hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
d552bc3a62a08ecbb9ab63cf63c7a33c8ead5da0
16,393
//! `Starvation` is a private type used for blocking other threads in order to finish some work that //! was unable to be performed speculatively in a finite amount of time. //! //! `Progress` contains the logic of when to signal that a thread is starving, and waits for other //! threads that are starving. //! //! Everything in this file uses `Ordering::Relaxed` meaning that this is really just a backoff //! algorithm, and synchronization should be provided by other types. //! //! In the presence of a fair scheduler and bounded critical sections, these types guarantee //! progress of all threads. This gives blocking algorithms many of the properties of wait-free //! algorithms. //! //! http://raiith.iith.ac.in/3530/1/1709.01033.pdf //! //! Based on RawMutex in parking_lot. //! //! https://github.com/Amanieu/parking_lot use crate::{ internal::epoch::{QuiesceEpoch, EPOCH_CLOCK, TICK_SIZE}, stats, }; use core::{ cell::Cell, mem, ptr::NonNull, sync::atomic::{self, AtomicU8, AtomicUsize, Ordering::Relaxed}, }; use parking_lot_core::{self, FilterOp, ParkResult, ParkToken, UnparkResult, UnparkToken}; use std::thread; /// If a thread started a transaction this many epochs ago, the thread will skip move directly into /// the `yield_now` phase of backoff. /// /// Lower values result in more serialization under contention. Higher values result in more wasted /// CPU cycles for large transactions. static MAX_ELAPSED_EPOCHS: AtomicUsize = AtomicUsize::new(0); // TODO: tinker with this value const EPOCH_BUFFER_ROOM: usize = 2; #[inline] pub fn inc_thread_estimate() { drop(MAX_ELAPSED_EPOCHS.fetch_add(TICK_SIZE * EPOCH_BUFFER_ROOM, Relaxed)); } #[inline] pub fn dec_thread_estimate() { drop(MAX_ELAPSED_EPOCHS.fetch_sub(TICK_SIZE * EPOCH_BUFFER_ROOM, Relaxed)); } #[inline] fn max_elapsed_epochs() -> usize { let result = MAX_ELAPSED_EPOCHS.load(Relaxed); debug_assert!(result > TICK_SIZE && result % TICK_SIZE == 0); result } const NO_STARVERS: usize = 0; const SPIN_LIMIT: u32 = 6; const YIELD_LIMIT: u32 = 10; const LOCKED_BIT: u8 = 1 << 0; const PARKED_BIT: u8 = 1 << 1; #[derive(Copy, Clone, Debug, PartialEq, Eq)] struct Token(NonNull<Progress>); impl Token { #[inline] fn new(raw: &Progress) -> Self { Token(raw.into()) } #[inline] fn park_token(self) -> ParkToken { ParkToken(self.0.as_ptr() as usize) } #[inline] fn unpark_token(self) -> UnparkToken { UnparkToken(self.0.as_ptr() as usize) } #[inline] fn from_park_token(park_token: ParkToken) -> Self { debug_assert!( park_token.0 != 0 && park_token.0 % mem::align_of::<Progress>() == 0, "ParkToken is not a valid pointer" ); // park tokens (in this file) are only ever created with valid Progress addresses. Token::new(unsafe { &*(park_token.0 as *mut Progress) }) } #[inline] unsafe fn as_ref(self) -> &'static Progress { &*self.0.cast().as_ptr() } } static STARVATION: Starvation = Starvation { state: AtomicU8::new(0), }; /// `Starvation` only uses `Relaxed` memory ` ordering. #[repr(align(64))] struct Starvation { state: AtomicU8, } impl Starvation { #[inline] fn starve_lock(&self, token: Token) { if self .state .compare_exchange_weak(0, LOCKED_BIT, Relaxed, Relaxed) .is_err() { self.starve_lock_slow(token); } } #[inline] fn starve_unlock<G: FnMut(Token) -> bool, U: FnOnce(Token)>( &self, should_upgrade: G, upgrade: U, ) { if self .state .compare_exchange(LOCKED_BIT, 0, Relaxed, Relaxed) .is_ok() { stats::blocked_by_starvation(0); return; } self.starve_unlock_slow(should_upgrade, upgrade); } #[inline] fn wait_for_starvers(&self, token: Token) { if unlikely!(self.state.load(Relaxed) != 0) { self.wait_for_starvers_slow(token) } } #[cold] #[inline(never)] fn starve_lock_slow(&self, token: Token) { let mut state = self.state.load(Relaxed); loop { if state == 0 { match self .state .compare_exchange_weak(0, LOCKED_BIT, Relaxed, Relaxed) { Ok(_) => return, Err(x) => state = x, } continue; } // Set the parked bit if state & PARKED_BIT == 0 { if let Err(x) = self.state .compare_exchange_weak(state, state | PARKED_BIT, Relaxed, Relaxed) { state = x; continue; } } // Park our thread until we are woken up by an unlock let addr = self as *const _ as usize; let validate = || self.state.load(Relaxed) & PARKED_BIT != 0; let before_sleep = || {}; let timed_out = |_, _| {}; let park_token = token.park_token(); match unsafe { parking_lot_core::park(addr, validate, before_sleep, timed_out, park_token, None) } { ParkResult::Unparked(wakeup_token) => { debug_assert_eq!( wakeup_token, token.unpark_token(), "unfairly unparking a starving thread" ); debug_assert!( self.state.load(Relaxed) & LOCKED_BIT != 0, "improperly set the state before handing off starvation control" ); return; } ParkResult::Invalid => {} ParkResult::TimedOut => debug_assert!(false), } state = self.state.load(Relaxed); } } #[cold] #[inline(never)] fn wait_for_starvers_slow(&self, token: Token) { let mut state = self.state.load(Relaxed); loop { if state == 0 { return; } // Set the parked bit if state & PARKED_BIT == 0 { if let Err(x) = self.state .compare_exchange_weak(state, state | PARKED_BIT, Relaxed, Relaxed) { state = x; continue; } } // Park our thread until we are woken up by an unlock let addr = self as *const _ as usize; let validate = || self.state.load(Relaxed) & PARKED_BIT != 0; let before_sleep = || {}; let timed_out = |_, _| {}; match unsafe { parking_lot_core::park( addr, validate, before_sleep, timed_out, token.park_token(), None, ) } { ParkResult::Unparked(UnparkToken(NO_STARVERS)) => { return; } ParkResult::Unparked(wakeup_token) => { if wakeup_token == token.unpark_token() { // this thread has been upgraded to a starver debug_assert!( self.state.load(Relaxed) & LOCKED_BIT != 0, "improperly set the state before handing off starvation control" ); return; } // unparked before it was known there was another starving thread. } ParkResult::Invalid => {} ParkResult::TimedOut => debug_assert!(false), } state = self.state.load(Relaxed); } } #[cold] #[inline(never)] fn starve_unlock_slow<G: FnMut(Token) -> bool, U: FnOnce(Token)>( &self, mut should_upgrade: G, upgrade: U, ) { let addr = self as *const _ as usize; let next_starved_token = Cell::new(None); let next_starved_token = &next_starved_token; // We don't know what thread we wish to unpark until we finish filtering. This means that // threads will sometimes be unparked without the possibility of making progress. let filter = |token: ParkToken| { debug_assert!(token.0 != NO_STARVERS, "invalid ParkToken detected"); let next_starved = next_starved_token.get(); if next_starved.is_none() { let token = Token::from_park_token(token); if should_upgrade(token) { next_starved_token.set(Some(token)); } FilterOp::Unpark } else { // At this point, it's known we're handing off control to another starving thread. FilterOp::Stop } }; let callback = |unpark_result: UnparkResult| { debug_assert!( self.state.load(Relaxed) & LOCKED_BIT != 0, "`Starvation::starve_unlock_slow`: unexpectedly not locked" ); debug_assert!( unpark_result.unparked_threads == 0 || self.state.load(Relaxed) & PARKED_BIT != 0, "`Starvation::starve_unlock_slow`: park bit was not properly set" ); debug_assert!( next_starved_token.get().is_none() || unpark_result.unparked_threads > 0, "`Starvation::starve_unlock_slow`: detected a starvation handoff that does not \ unpark any threads" ); debug_assert!( !unpark_result.have_more_threads || next_starved_token.get().is_some(), "`Starvation::starve_unlock_slow`: no starvers remaining, but threads remain \ parked" ); let next_starved = next_starved_token.get(); if !unpark_result.have_more_threads { let next_state = if next_starved.is_some() { LOCKED_BIT } else { 0 }; self.state.store(next_state, Relaxed); } match next_starved { Some(next_starved) => { upgrade(next_starved); next_starved.unpark_token() } None => UnparkToken(NO_STARVERS), } }; let result = unsafe { parking_lot_core::unpark_filter(addr, filter, callback) }; if next_starved_token.get().is_some() { stats::starvation_handoff(); } stats::blocked_by_starvation(result.unparked_threads) } } #[derive(Debug, Copy, Clone)] enum ProgressImpl { NotStarving { first_failed_epoch: Option<QuiesceEpoch>, backoff: u32, }, Starving, } impl ProgressImpl { #[inline] const fn new() -> Self { ProgressImpl::NotStarving { first_failed_epoch: None, backoff: 0, } } #[inline] fn should_upgrade(&self) -> bool { match self { ProgressImpl::NotStarving { first_failed_epoch: Some(epoch), backoff, } => { if *backoff >= YIELD_LIMIT { return true; } let now = EPOCH_CLOCK.now().unwrap_or_else(|| abort!()); now.get().get() - epoch.get().get() >= max_elapsed_epochs() } ProgressImpl::NotStarving { first_failed_epoch: None, .. } => false, ProgressImpl::Starving => { debug_assert!(false); false } } } } pub struct Progress { /// The `Cell` here is actually accessed from multiple threads, but only while the "owning" /// thread is parked, and parking lots bucket locks are held. inner: Cell<ProgressImpl>, } #[cfg(debug_assertions)] impl Drop for Progress { fn drop(&mut self) { match self.inner.get() { ProgressImpl::NotStarving { first_failed_epoch: None, backoff: 0, } => {} inner => panic!( "`Progress` dropped without having made progress: {:?}", inner ), } } } impl Progress { #[inline] pub const fn new() -> Self { Progress { inner: Cell::new(ProgressImpl::new()), } } /// Called when a thread has failed either the optimistic phase of concurrency, or the /// pessimistic phase of concurrency. #[cold] pub fn failed_to_progress(&self, epoch: QuiesceEpoch) { // TODO: can this be golfed, and/or write to less memory? match self.inner.get() { ProgressImpl::NotStarving { first_failed_epoch, backoff, } => { if backoff <= YIELD_LIMIT { let first_failed_epoch = first_failed_epoch.unwrap_or(epoch); if backoff <= SPIN_LIMIT { if epoch.get().get() - first_failed_epoch.get().get() >= max_elapsed_epochs() { // long transaction detected, `spin_loop_hint` is probably a bad backoff // strategy. self.inner.set(ProgressImpl::NotStarving { first_failed_epoch: Some(first_failed_epoch), backoff: SPIN_LIMIT + 1, }); thread::yield_now(); return; } else { for _ in 0..1 << backoff { atomic::spin_loop_hint(); } } } else { thread::yield_now(); } self.inner.set(ProgressImpl::NotStarving { first_failed_epoch: Some(first_failed_epoch), backoff: backoff + 1, }); } else { thread::yield_now(); STARVATION.starve_lock(Token::new(self)); self.inner.set(ProgressImpl::Starving) } } ProgressImpl::Starving => { // There might be a few straggler threads that were in the middle of a commit when // this thread signaled it was starving. Rare, but in that scenario we can commit // twice, and some backoff is probably warranted. thread::yield_now(); } }; } /// Called when a thread has finished the optimistic phase of concurrency, and is about to enter /// a pessimistic phase where the threads progress will be published. #[inline] pub fn wait_for_starvers(&self) { match self.inner.get() { ProgressImpl::NotStarving { .. } => STARVATION.wait_for_starvers(Token::new(self)), ProgressImpl::Starving => {} }; } /// Called after progress has been made. #[inline] pub fn progressed(&self) { match self.inner.get() { ProgressImpl::NotStarving { first_failed_epoch: None, .. } => return, _ => {} } self.progressed_slow() } #[inline(never)] #[cold] fn progressed_slow(&self) { match self.inner.get() { ProgressImpl::NotStarving { .. } => {} ProgressImpl::Starving => unsafe { STARVATION.starve_unlock( |this| this.as_ref().inner.get().should_upgrade(), |this| this.as_ref().inner.set(ProgressImpl::Starving), ); }, }; self.inner.set(ProgressImpl::new()); } }
33.117172
100
0.512597
754391a284ddfd9a415f28ca9be2bce64063341d
7,014
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(deprecated)] use core::ptr::*; use libc::c_char; use core::mem; use libc; use std::c_str::CString; #[test] fn test() { unsafe { struct Pair { fst: int, snd: int }; let mut p = Pair {fst: 10, snd: 20}; let pptr: *mut Pair = &mut p; let iptr: *mut int = mem::transmute(pptr); assert_eq!(*iptr, 10); *iptr = 30; assert_eq!(*iptr, 30); assert_eq!(p.fst, 30); *pptr = Pair {fst: 50, snd: 60}; assert_eq!(*iptr, 50); assert_eq!(p.fst, 50); assert_eq!(p.snd, 60); let v0 = vec![32000u16, 32001u16, 32002u16]; let mut v1 = vec![0u16, 0u16, 0u16]; copy_memory(v1.as_mut_ptr().offset(1), v0.as_ptr().offset(1), 1); assert!((*v1.get(0) == 0u16 && *v1.get(1) == 32001u16 && *v1.get(2) == 0u16)); copy_memory(v1.as_mut_ptr(), v0.as_ptr().offset(2), 1); assert!((*v1.get(0) == 32002u16 && *v1.get(1) == 32001u16 && *v1.get(2) == 0u16)); copy_memory(v1.as_mut_ptr().offset(2), v0.as_ptr(), 1u); assert!((*v1.get(0) == 32002u16 && *v1.get(1) == 32001u16 && *v1.get(2) == 32000u16)); } } #[test] fn test_position() { use libc::c_char; "hello".with_c_str(|p| { unsafe { assert!(2u == position(p, |c| *c == 'l' as c_char)); assert!(4u == position(p, |c| *c == 'o' as c_char)); assert!(5u == position(p, |c| *c == 0 as c_char)); } }) } #[test] fn test_buf_len() { "hello".with_c_str(|p0| { "there".with_c_str(|p1| { "thing".with_c_str(|p2| { let v = vec![p0, p1, p2, null()]; unsafe { assert_eq!(buf_len(v.as_ptr()), 3u); } }) }) }) } #[test] fn test_is_null() { let p: *const int = null(); assert!(p.is_null()); assert!(!p.is_not_null()); let q = unsafe { p.offset(1) }; assert!(!q.is_null()); assert!(q.is_not_null()); let mp: *mut int = mut_null(); assert!(mp.is_null()); assert!(!mp.is_not_null()); let mq = unsafe { mp.offset(1) }; assert!(!mq.is_null()); assert!(mq.is_not_null()); } #[test] fn test_as_ref() { unsafe { let p: *const int = null(); assert_eq!(p.as_ref(), None); let q: *const int = &2; assert_eq!(q.as_ref().unwrap(), &2); let p: *mut int = mut_null(); assert_eq!(p.as_ref(), None); let q: *mut int = &mut 2; assert_eq!(q.as_ref().unwrap(), &2); // Lifetime inference let u = 2i; { let p: *const int = &u as *const _; assert_eq!(p.as_ref().unwrap(), &2); } } } #[test] fn test_as_mut() { unsafe { let p: *mut int = mut_null(); assert!(p.as_mut() == None); let q: *mut int = &mut 2; assert!(q.as_mut().unwrap() == &mut 2); // Lifetime inference let mut u = 2i; { let p: *mut int = &mut u as *mut _; assert!(p.as_mut().unwrap() == &mut 2); } } } #[test] fn test_ptr_addition() { unsafe { let xs = Vec::from_elem(16, 5i); let mut ptr = xs.as_ptr(); let end = ptr.offset(16); while ptr < end { assert_eq!(*ptr, 5); ptr = ptr.offset(1); } let mut xs_mut = xs; let mut m_ptr = xs_mut.as_mut_ptr(); let m_end = m_ptr.offset(16); while m_ptr < m_end { *m_ptr += 5; m_ptr = m_ptr.offset(1); } assert!(xs_mut == Vec::from_elem(16, 10i)); } } #[test] fn test_ptr_subtraction() { unsafe { let xs = vec![0,1,2,3,4,5,6,7,8,9]; let mut idx = 9i8; let ptr = xs.as_ptr(); while idx >= 0i8 { assert_eq!(*(ptr.offset(idx as int)), idx as int); idx = idx - 1i8; } let mut xs_mut = xs; let m_start = xs_mut.as_mut_ptr(); let mut m_ptr = m_start.offset(9); while m_ptr >= m_start { *m_ptr += *m_ptr; m_ptr = m_ptr.offset(-1); } assert!(xs_mut == vec![0,2,4,6,8,10,12,14,16,18]); } } #[test] fn test_ptr_array_each_with_len() { unsafe { let one = "oneOne".to_c_str(); let two = "twoTwo".to_c_str(); let three = "threeThree".to_c_str(); let arr = vec![ one.as_ptr(), two.as_ptr(), three.as_ptr() ]; let expected_arr = [ one, two, three ]; let mut ctr = 0; let mut iteration_count = 0; array_each_with_len(arr.as_ptr(), arr.len(), |e| { let actual = CString::new(e, false); assert_eq!(actual.as_str(), expected_arr[ctr].as_str()); ctr += 1; iteration_count += 1; }); assert_eq!(iteration_count, 3u); } } #[test] fn test_ptr_array_each() { unsafe { let one = "oneOne".to_c_str(); let two = "twoTwo".to_c_str(); let three = "threeThree".to_c_str(); let arr = vec![ one.as_ptr(), two.as_ptr(), three.as_ptr(), // fake a null terminator null() ]; let expected_arr = [ one, two, three ]; let arr_ptr = arr.as_ptr(); let mut ctr = 0u; let mut iteration_count = 0u; array_each(arr_ptr, |e| { let actual = CString::new(e, false); assert_eq!(actual.as_str(), expected_arr[ctr].as_str()); ctr += 1; iteration_count += 1; }); assert_eq!(iteration_count, 3); } } #[test] #[should_fail] fn test_ptr_array_each_with_len_null_ptr() { unsafe { array_each_with_len(0 as *const *const libc::c_char, 1, |e| { CString::new(e, false).as_str().unwrap(); }); } } #[test] #[should_fail] fn test_ptr_array_each_null_ptr() { unsafe { array_each(0 as *const *const libc::c_char, |e| { CString::new(e, false).as_str().unwrap(); }); } } #[test] fn test_set_memory() { let mut xs = [0u8, ..20]; let ptr = xs.as_mut_ptr(); unsafe { set_memory(ptr, 5u8, xs.len()); } assert!(xs == [5u8, ..20]); }
25.139785
72
0.485458
690f0ad7b856c56acaa1eb8a856bffbbefbe9a5e
1,822
// Silence some warnings so they don't distract from the exercise. #![allow(unused_variables)] fn main() { let width = 4; let height = 7; let depth = 10; // 1. Try running this code with `cargo run` and take a look at the error. // // See if you can fix the error. It is right around here, somewhere. If you succeed, then // doing `cargo run` should succeed and print something out. let area = area_of(width, height); println!("Area is {}", area); // 2. The area that was calculated is not correct! Go fix the area_of() function below, then run // the code again and make sure it worked (you should get an area of 28). // 3. Uncomment the line below. It doesn't work yet because the `volume` function doesn't exist. // Create the `volume` function! It should: // - Take three arguments of type i32 // - Multiply the three arguments together // - Return the result (which should be 280 when you run the program). // // If you get stuck, remember that this is *very* similar to what `area_of` does. // println!("Volume is {}", volume(width, height, depth)); fn volume(w: i32, h: i32, d: i32) -> i32 { w * h * d } fn area_of(x: i32, y: i32) -> i32 { // 2a. Fix this function to correctly compute the area of a rectangle given // dimensions x and y by multiplying x and y and returning the result. // x * y // Challenge: It isn't idiomatic (the normal way a Rust programmer would do things) to use // `return` on the last line of a function. Change the last line to be a // "tail expression" that returns a value without using `return`. // Hint: `cargo clippy` will warn you about this exact thing. } }
43.380952
101
0.618002
7ac4073d45868fe3f2dc312a7f082e3fe50066e5
4,906
// Copyright 2018-2020, Wayfair GmbH // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Kafka Offramp //! //! The `kafka` offramp allows persisting events to a kafka queue. //! //! ## Configuration //! //! See [Config](struct.Config.html) for details. use crate::offramp::prelude::*; use halfbrown::HashMap; use rdkafka::config::ClientConfig; use rdkafka::producer::{FutureProducer, FutureRecord}; use std::fmt; #[derive(Deserialize)] pub struct Config { /// list of brokers pub brokers: Vec<String>, /// the topic to send to pub topic: String, /// the number of threads in the async worker pool handling writing to kafka (default: 4) #[serde(default = "dflt::d_4")] pub threads: usize, /// a map (string keys and string values) of [librdkafka options](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) (default: None) - Note this can overwrite default settings. /// /// Default settings for librdkafka: /// /// * `client.id` - `"tremor-<hostname>-0"` /// * `bootstrap.servers` - `brokers` from the config concatinated by `,` /// * `message.timeout.ms` - `"5000"` /// * `queue.buffering.max.ms` - `"0"` - don't buffer for lower latency (high) #[serde(default = "dflt::d")] pub rdkafka_options: HashMap<String, String>, /// hostname to use, defaults to the hostname of the system #[serde(default = "d_host")] pub hostname: String, #[serde(default = "dflt::d")] pub key: Option<String>, } impl ConfigImpl for Config {} fn d_host() -> String { hostname() } /// Kafka offramp connectoz pub struct Kafka { producer: FutureProducer, topic: String, key: Option<String>, pipelines: HashMap<TremorURL, pipeline::Addr>, postprocessors: Postprocessors, } impl fmt::Debug for Kafka { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Kafka: {}", self.topic) } } impl offramp::Impl for Kafka { fn from_config(config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> { if let Some(config) = config { let config: Config = Config::new(config)?; let mut producer_config = ClientConfig::new(); let producer_config = producer_config .set("client.id", &format!("tremor-{}-{}", config.hostname, 0)) .set("bootstrap.servers", &config.brokers.join(",")) .set("message.timeout.ms", "5000") .set("queue.buffering.max.ms", "0"); let producer = config .rdkafka_options .iter() .fold(producer_config, |c: &mut ClientConfig, (k, v)| c.set(k, v)) .create()?; let key = config.key.clone(); // Create the thread pool where the expensive computation will be performed. Ok(Box::new(Self { producer, topic: config.topic, pipelines: HashMap::new(), postprocessors: vec![], key, })) } else { Err("Kafka offramp requires a config".into()) } } } impl Offramp for Kafka { // TODO fn on_event(&mut self, codec: &Box<dyn Codec>, _input: String, event: Event) -> Result<()> { for value in event.value_iter() { let raw = codec.encode(value)?; let mut record = FutureRecord::to(&self.topic); record = record.payload(&raw); //TODO: Key let record = if let Some(ref k) = self.key { record.key(k.as_str()) } else { record }; match self.producer.send_result(record) { Ok(f) => { task::spawn(f); } Err((e, _)) => error!("[Kafka Offramp] failed to enque message: {}", e), } } Ok(()) } fn add_pipeline(&mut self, id: TremorURL, addr: pipeline::Addr) { self.pipelines.insert(id, addr); } fn remove_pipeline(&mut self, id: TremorURL) -> bool { self.pipelines.remove(&id); self.pipelines.is_empty() } fn default_codec(&self) -> &str { "json" } fn start(&mut self, _codec: &Box<dyn Codec>, postprocessors: &[String]) -> Result<()> { self.postprocessors = make_postprocessors(postprocessors)?; Ok(()) } }
33.60274
198
0.582144
efb0807c0a0e750d85643c704bff21838c94b0b6
695,938
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 // This file was generated. Do not modify! // // To update this code, run: `cargo run --release -p diem-framework`. //! Conversion library between a structured representation of a Move script call (`ScriptCall`) and the //! standard BCS-compatible representation used in Diem transactions (`Script`). //! //! This code was generated by compiling known Script interfaces ("ABIs") with the tool `transaction-builder-generator`. #![allow(clippy::unnecessary_wraps)] #![allow(unused_imports)] use diem_types::{ account_address::AccountAddress, transaction::{Script, ScriptFunction, TransactionArgument, TransactionPayload}, }; use move_core_types::{ ident_str, language_storage::{ModuleId, TypeTag}, }; use std::collections::BTreeMap as Map; type Bytes = Vec<u8>; /// Structured representation of a call into a known Move script. /// ```ignore /// impl ScriptCall { /// pub fn encode(self) -> Script { .. } /// pub fn decode(&Script) -> Option<ScriptCall> { .. } /// } /// ``` #[derive(Clone, Debug, PartialEq, PartialOrd)] #[cfg_attr(feature = "fuzzing", derive(proptest_derive::Arbitrary))] #[cfg_attr(feature = "fuzzing", proptest(no_params))] pub enum ScriptCall { /// # Summary /// Adds a zero `Currency` balance to the sending `account`. This will enable `account` to /// send, receive, and hold `Diem::Diem<Currency>` coins. This transaction can be /// successfully sent by any account that is allowed to hold balances /// (e.g., VASP, Designated Dealer). /// /// # Technical Description /// After the successful execution of this transaction the sending account will have a /// `DiemAccount::Balance<Currency>` resource with zero balance published under it. Only /// accounts that can hold balances can send this transaction, the sending account cannot /// already have a `DiemAccount::Balance<Currency>` published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being added to the sending account of the transaction. `Currency` must be an already-registered currency on-chain. | /// | `account` | `&signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EROLE_CANT_STORE_BALANCE` | The sending `account`'s role does not permit balances. | /// | `Errors::ALREADY_PUBLISHED` | `DiemAccount::EADD_EXISTING_CURRENCY` | A balance for `Currency` is already published under the sending `account`. | /// /// # Related Scripts /// * `Script::create_child_vasp_account` /// * `Script::create_parent_vasp_account` /// * `Script::peer_to_peer_with_metadata` AddCurrencyToAccount { currency: TypeTag }, /// # Summary /// Stores the sending accounts ability to rotate its authentication key with a designated recovery /// account. Both the sending and recovery accounts need to belong to the same VASP and /// both be VASP accounts. After this transaction both the sending account and the /// specified recovery account can rotate the sender account's authentication key. /// /// # Technical Description /// Adds the `DiemAccount::KeyRotationCapability` for the sending account /// (`to_recover_account`) to the `RecoveryAddress::RecoveryAddress` resource under /// `recovery_address`. After this transaction has been executed successfully the account at /// `recovery_address` and the `to_recover_account` may rotate the authentication key of /// `to_recover_account` (the sender of this transaction). /// /// The sending account of this transaction (`to_recover_account`) must not have previously given away its unique key /// rotation capability, and must be a VASP account. The account at `recovery_address` /// must also be a VASP account belonging to the same VASP as the `to_recover_account`. /// Additionally the account at `recovery_address` must have already initialized itself as /// a recovery account address using the `Script::create_recovery_address` transaction script. /// /// The sending account's (`to_recover_account`) key rotation capability is /// removed in this transaction and stored in the `RecoveryAddress::RecoveryAddress` /// resource stored under the account at `recovery_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `to_recover_account` | `&signer` | The signer reference of the sending account of this transaction. | /// | `recovery_address` | `address` | The account address where the `to_recover_account`'s `DiemAccount::KeyRotationCapability` will be stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `to_recover_account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EINVALID_KEY_ROTATION_DELEGATION` | `to_recover_account` and `recovery_address` do not belong to the same VASP. | /// | `Errors::LIMIT_EXCEEDED` | ` RecoveryAddress::EMAX_KEYS_REGISTERED` | `RecoveryAddress::MAX_REGISTERED_KEYS` have already been registered with this `recovery_address`. | /// /// # Related Scripts /// * `Script::create_recovery_address` /// * `Script::rotate_authentication_key_with_recovery_address` AddRecoveryRotationCapability { recovery_address: AccountAddress }, /// # Summary /// Adds a validator account to the validator set, and triggers a /// reconfiguration of the system to admit the account to the validator set for the system. This /// transaction can only be successfully called by the Diem Root account. /// /// # Technical Description /// This script adds the account at `validator_address` to the validator set. /// This transaction emits a `DiemConfig::NewEpochEvent` event and triggers a /// reconfiguration. Once the reconfiguration triggered by this script's /// execution has been performed, the account at the `validator_address` is /// considered to be a validator in the network. /// /// This transaction script will fail if the `validator_address` address is already in the validator set /// or does not have a `ValidatorConfig::ValidatorConfig` resource already published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be added to the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EINVALID_PROSPECTIVE_VALIDATOR` | The validator to be added does not have a `ValidatorConfig::ValidatorConfig` resource published under it, or its `config` field is empty. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EALREADY_A_VALIDATOR` | The `validator_address` account is already a registered validator. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` AddValidatorAndReconfigure { sliding_nonce: u64, validator_name: Bytes, validator_address: AccountAddress, }, /// # Summary /// Burns all coins held in the preburn resource at the specified /// preburn address and removes them from the system. The sending account must /// be the Treasury Compliance account. /// The account that holds the preburn resource will normally be a Designated /// Dealer, but there are no enforced requirements that it be one. /// /// # Technical Description /// This transaction permanently destroys all the coins of `Token` type /// stored in the `Diem::Preburn<Token>` resource published under the /// `preburn_address` account address. /// /// This transaction will only succeed if the sending `account` has a /// `Diem::BurnCapability<Token>`, and a `Diem::Preburn<Token>` resource /// exists under `preburn_address`, with a non-zero `to_burn` field. After the successful execution /// of this transaction the `total_value` field in the /// `Diem::CurrencyInfo<Token>` resource published under `0xA550C18` will be /// decremented by the value of the `to_burn` field of the preburn resource /// under `preburn_address` immediately before this transaction, and the /// `to_burn` field of the preburn resource will have a zero value. /// /// ## Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<Token>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being burned. `Token` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | The account at `preburn_address` does not have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_EMPTY` | The `Diem::Preburn<Token>` resource is empty (has a value of 0). | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// /// # Related Scripts /// * `Script::burn_txn_fees` /// * `Script::cancel_burn` /// * `Script::preburn` Burn { token: TypeTag, sliding_nonce: u64, preburn_address: AccountAddress, }, /// # Summary /// Burns the transaction fees collected in the `CoinType` currency so that the /// Diem association may reclaim the backing coins off-chain. May only be sent /// by the Treasury Compliance account. /// /// # Technical Description /// Burns the transaction fees collected in `CoinType` so that the /// association may reclaim the backing coins. Once this transaction has executed /// successfully all transaction fees that will have been collected in /// `CoinType` since the last time this script was called with that specific /// currency. Both `balance` and `preburn` fields in the /// `TransactionFee::TransactionFee<CoinType>` resource published under the `0xB1E55ED` /// account address will have a value of 0 after the successful execution of this script. /// /// ## Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<CoinType>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being added to the sending account of the transaction. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `TransactionFee::ETRANSACTION_FEE` | `CoinType` is not an accepted transaction fee currency. | /// | `Errors::INVALID_ARGUMENT` | `Diem::ECOIN` | The collected fees in `CoinType` are zero. | /// /// # Related Scripts /// * `Script::burn` /// * `Script::cancel_burn` BurnTxnFees { coin_type: TypeTag }, /// # Summary /// Cancels and returns all coins held in the preburn area under /// `preburn_address` and returns the funds to the `preburn_address`'s balance. /// Can only be successfully sent by an account with Treasury Compliance role. /// /// # Technical Description /// Cancels and returns all coins held in the `Diem::Preburn<Token>` resource under the `preburn_address` and /// return the funds to the `preburn_address` account's `DiemAccount::Balance<Token>`. /// The transaction must be sent by an `account` with a `Diem::BurnCapability<Token>` /// resource published under it. The account at `preburn_address` must have a /// `Diem::Preburn<Token>` resource published under it, and its value must be nonzero. The transaction removes /// the entire balance held in the `Diem::Preburn<Token>` resource, and returns it back to the account's /// `DiemAccount::Balance<Token>` under `preburn_address`. Due to this, the account at /// `preburn_address` must already have a balance in the `Token` currency published /// before this script is called otherwise the transaction will fail. /// /// ## Events /// The successful execution of this transaction will emit: /// * A `Diem::CancelBurnEvent` on the event handle held in the `Diem::CurrencyInfo<Token>` /// resource's `burn_events` published under `0xA550C18`. /// * A `DiemAccount::ReceivedPaymentEvent` on the `preburn_address`'s /// `DiemAccount::DiemAccount` `received_events` event handle with both the `payer` and `payee` /// being `preburn_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currenty that burning is being cancelled for. `Token` must be an already-registered currency on-chain. | /// | `account` | `&signer` | The signer reference of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | The account at `preburn_address` does not have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECOIN_DEPOSIT_IS_ZERO` | The value held in the preburn resource was zero. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | The account at `preburn_address` doesn't have a balance resource for `Token`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds held in the prebun area would exceed the `account`'s account limits. | /// | `Errors::INVALID_STATE` | `DualAttestation::EPAYEE_COMPLIANCE_KEY_NOT_SET` | The `account` does not have a compliance key set on it but dual attestion checking was performed. | /// /// # Related Scripts /// * `Script::burn_txn_fees` /// * `Script::burn` /// * `Script::preburn` CancelBurn { token: TypeTag, preburn_address: AccountAddress, }, /// # Summary /// Creates a Child VASP account with its parent being the sending account of the transaction. /// The sender of the transaction must be a Parent VASP account. /// /// # Technical Description /// Creates a `ChildVASP` account for the sender `parent_vasp` at `child_address` with a balance of /// `child_initial_balance` in `CoinType` and an initial authentication key of /// `auth_key_prefix | child_address`. /// /// If `add_all_currencies` is true, the child address will have a zero balance in all available /// currencies in the system. /// /// The new account will be a child account of the transaction sender, which must be a /// Parent VASP account. The child account will be recorded against the limit of /// child accounts of the creating Parent VASP account. /// /// ## Events /// Successful execution with a `child_initial_balance` greater than zero will emit: /// * A `DiemAccount::SentPaymentEvent` with the `payer` field being the Parent VASP's address, /// and payee field being `child_address`. This is emitted on the Parent VASP's /// `DiemAccount::DiemAccount` `sent_events` handle. /// * A `DiemAccount::ReceivedPaymentEvent` with the `payer` field being the Parent VASP's address, /// and payee field being `child_address`. This is emitted on the new Child VASPS's /// `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` that the child account should be created with. `CoinType` must be an already-registered currency on-chain. | /// | `parent_vasp` | `&signer` | The signer reference of the sending account. Must be a Parent VASP account. | /// | `child_address` | `address` | Address of the to-be-created Child VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// | `child_initial_balance` | `u64` | The initial balance in `CoinType` to give the child account when it's created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | The `auth_key_prefix` was not of length 32. | /// | `Errors::REQUIRES_ROLE` | `Roles::EPARENT_VASP` | The sending account wasn't a Parent VASP account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `child_address` address is already taken. | /// | `Errors::LIMIT_EXCEEDED` | `VASP::ETOO_MANY_CHILDREN` | The sending account has reached the maximum number of allowed child accounts. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for the sending account has already been extracted. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | The sending account doesn't have a balance in `CoinType`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | The sending account doesn't have at least `child_initial_balance` of `CoinType` balance. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECANNOT_CREATE_AT_VM_RESERVED` | The `child_address` is the reserved address 0x0. | /// /// # Related Scripts /// * `Script::create_parent_vasp_account` /// * `Script::add_currency_to_account` /// * `Script::rotate_authentication_key` /// * `Script::add_recovery_rotation_capability` /// * `Script::create_recovery_address` CreateChildVaspAccount { coin_type: TypeTag, child_address: AccountAddress, auth_key_prefix: Bytes, add_all_currencies: bool, child_initial_balance: u64, }, /// # Summary /// Creates a Designated Dealer account with the provided information, and initializes it with /// default mint tiers. The transaction can only be sent by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Designated Dealer role at `addr` with authentication key /// `auth_key_prefix` | `addr` and a 0 balance of type `Currency`. If `add_all_currencies` is true, /// 0 balances for all available currencies in the system will also be added. This can only be /// invoked by an account with the TreasuryCompliance role. /// /// At the time of creation the account is also initialized with default mint tiers of (500_000, /// 5000_000, 50_000_000, 500_000_000), and preburn areas for each currency that is added to the /// account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` that the Designated Dealer should be initialized with. `Currency` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `addr` | `address` | Address of the to-be-created Designated Dealer account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Designated Dealer. | /// | `add_all_currencies` | `bool` | Whether to publish preburn, balance, and tier info resources for all known (SCS) currencies or just `Currency` when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `addr` address is already taken. | /// /// # Related Scripts /// * `Script::tiered_mint` /// * `Script::peer_to_peer_with_metadata` /// * `Script::rotate_dual_attestation_info` CreateDesignatedDealer { currency: TypeTag, sliding_nonce: u64, addr: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, add_all_currencies: bool, }, /// # Summary /// Creates a Parent VASP account with the specified human name. Must be called by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Parent VASP role at `address` with authentication key /// `auth_key_prefix` | `new_account_address` and a 0 balance of type `CoinType`. If /// `add_all_currencies` is true, 0 balances for all available currencies in the system will /// also be added. This can only be invoked by an TreasuryCompliance account. /// `sliding_nonce` is a unique nonce for operation, see `SlidingNonce` for details. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` currency that the Parent VASP account should be initialized with. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Parent VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Parent VASP. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `Script::create_child_vasp_account` /// * `Script::add_currency_to_account` /// * `Script::rotate_authentication_key` /// * `Script::add_recovery_rotation_capability` /// * `Script::create_recovery_address` /// * `Script::rotate_dual_attestation_info` CreateParentVaspAccount { coin_type: TypeTag, sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, add_all_currencies: bool, }, /// # Summary /// Initializes the sending account as a recovery address that may be used by /// the VASP that it belongs to. The sending account must be a VASP account. /// Multiple recovery addresses can exist for a single VASP, but accounts in /// each must be disjoint. /// /// # Technical Description /// Publishes a `RecoveryAddress::RecoveryAddress` resource under `account`. It then /// extracts the `DiemAccount::KeyRotationCapability` for `account` and adds /// it to the resource. After the successful execution of this transaction /// other accounts may add their key rotation to this resource so that `account` /// may be used as a recovery account for those accounts. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ENOT_A_VASP` | `account` is not a VASP account. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EKEY_ROTATION_DEPENDENCY_CYCLE` | A key rotation recovery cycle would be created by adding `account`'s key rotation capability. | /// | `Errors::ALREADY_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | A `RecoveryAddress::RecoveryAddress` resource has already been published under `account`. | /// /// # Related Scripts /// * `Script::add_recovery_rotation_capability` /// * `Script::rotate_authentication_key_with_recovery_address` CreateRecoveryAddress {}, /// # Summary /// Creates a Validator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorConfig::ValidatorConfig` resource with empty `config`, and /// `operator_account` fields. The `human_name` field of the /// `ValidatorConfig::ValidatorConfig` is set to the passed in `human_name`. /// This script does not add the validator to the validator set or the system, /// but only creates the account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `Script::add_validator_and_reconfigure` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` CreateValidatorAccount { sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, }, /// # Summary /// Creates a Validator Operator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator Operator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorOperatorConfig::ValidatorOperatorConfig` resource with the specified `human_name`. /// This script does not assign the validator operator to any validator accounts but only creates the account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::add_validator_and_reconfigure` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` CreateValidatorOperatorAccount { sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, }, /// # Summary /// Freezes the account at `address`. The sending account of this transaction /// must be the Treasury Compliance account. The account being frozen cannot be /// the Diem Root or Treasury Compliance account. After the successful /// execution of this transaction no transactions may be sent from the frozen /// account, and the frozen account may not send or receive coins. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `true` and emits a /// `AccountFreezing::FreezeAccountEvent`. The transaction sender must be the /// Treasury Compliance account, but the account at `to_freeze_account` must /// not be either `0xA550C18` (the Diem Root address), or `0xB1E55ED` (the /// Treasury Compliance address). Note that this is a per-account property /// e.g., freezing a Parent VASP will not effect the status any of its child /// accounts and vice versa. /// /// ## Events /// Successful execution of this transaction will emit a `AccountFreezing::FreezeAccountEvent` on /// the `freeze_event_handle` held in the `AccountFreezing::FreezeEventsHolder` resource published /// under `0xA550C18` with the `frozen_address` being the `to_freeze_account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_freeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_TC` | `to_freeze_account` was the Treasury Compliance account (`0xB1E55ED`). | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_DIEM_ROOT` | `to_freeze_account` was the Diem Root account (`0xA550C18`). | /// /// # Related Scripts /// * `Script::unfreeze_account` FreezeAccount { sliding_nonce: u64, to_freeze_account: AccountAddress, }, /// # Summary /// Transfers a given number of coins in a specified currency from one account to another. /// Transfers over a specified amount defined on-chain that are between two different VASPs, or /// other accounts that have opted-in will be subject to on-chain checks to ensure the receiver has /// agreed to receive the coins. This transaction can be sent by any account that can hold a /// balance, and to any account that can hold a balance. Both accounts must hold balances in the /// currency being transacted. /// /// # Technical Description /// /// Transfers `amount` coins of type `Currency` from `payer` to `payee` with (optional) associated /// `metadata` and an (optional) `metadata_signature` on the message /// `metadata` | `Signer::address_of(payer)` | `amount` | `DualAttestation::DOMAIN_SEPARATOR`. /// The `metadata` and `metadata_signature` parameters are only required if `amount` >= /// `DualAttestation::get_cur_microdiem_limit` XDX and `payer` and `payee` are distinct VASPs. /// However, a transaction sender can opt in to dual attestation even when it is not required /// (e.g., a DesignatedDealer -> VASP payment) by providing a non-empty `metadata_signature`. /// Standardized `metadata` BCS format can be found in `diem_types::transaction::metadata::Metadata`. /// /// ## Events /// Successful execution of this script emits two events: /// * A `DiemAccount::SentPaymentEvent` on `payer`'s `DiemAccount::DiemAccount` `sent_events` handle; and /// * A `DiemAccount::ReceivedPaymentEvent` on `payee`'s `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being sent in this transaction. `Currency` must be an already-registered currency on-chain. | /// | `payer` | `&signer` | The signer reference of the sending account that coins are being transferred from. | /// | `payee` | `address` | The address of the account the coins are being transferred to. | /// | `metadata` | `vector<u8>` | Optional metadata about this payment. | /// | `metadata_signature` | `vector<u8>` | Optional signature over `metadata` and payment information. See | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `payer` doesn't hold a balance in `Currency`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Currency`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECOIN_DEPOSIT_IS_ZERO` | `amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYEE_DOES_NOT_EXIST` | No account exists at the `payee` address. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | An account exists at `payee`, but it does not accept payments in `Currency`. | /// | `Errors::INVALID_STATE` | `AccountFreezing::EACCOUNT_FROZEN` | The `payee` account is frozen. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EMALFORMED_METADATA_SIGNATURE` | `metadata_signature` is not 64 bytes. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_METADATA_SIGNATURE` | `metadata_signature` does not verify on the against the `payee'`s `DualAttestation::Credential` `compliance_public_key` public key. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EWITHDRAWAL_EXCEEDS_LIMITS` | `payer` has exceeded its daily withdrawal limits for the backing coins of XDX. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | `payee` has exceeded its daily deposit limits for XDX. | /// /// # Related Scripts /// * `Script::create_child_vasp_account` /// * `Script::create_parent_vasp_account` /// * `Script::add_currency_to_account` PeerToPeerWithMetadata { currency: TypeTag, payee: AccountAddress, amount: u64, metadata: Bytes, metadata_signature: Bytes, }, /// # Summary /// Moves a specified number of coins in a given currency from the account's /// balance to its preburn area after which the coins may be burned. This /// transaction may be sent by any account that holds a balance and preburn area /// in the specified currency. /// /// # Technical Description /// Moves the specified `amount` of coins in `Token` currency from the sending `account`'s /// `DiemAccount::Balance<Token>` to the `Diem::Preburn<Token>` published under the same /// `account`. `account` must have both of these resources published under it at the start of this /// transaction in order for it to execute successfully. /// /// ## Events /// Successful execution of this script emits two events: /// * `DiemAccount::SentPaymentEvent ` on `account`'s `DiemAccount::DiemAccount` `sent_events` /// handle with the `payee` and `payer` fields being `account`'s address; and /// * A `Diem::PreburnEvent` with `Token`'s currency code on the /// `Diem::CurrencyInfo<Token`'s `preburn_events` handle for `Token` and with /// `preburn_address` set to `account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being moved to the preburn area. `Token` must be an already-registered currency on-chain. | /// | `account` | `&signer` | The signer reference of the sending account. | /// | `amount` | `u64` | The amount in `Token` to be moved to the preburn area. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for `account` has already been extracted. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `account` doesn't hold a balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | `account` doesn't have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_OCCUPIED` | The `value` field in the `Diem::Preburn<Token>` resource under the sender is non-zero. | /// | `Errors::NOT_PUBLISHED` | `Roles::EROLE_ID` | The `account` did not have a role assigned to it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDESIGNATED_DEALER` | The `account` did not have the role of DesignatedDealer. | /// /// # Related Scripts /// * `Script::cancel_burn` /// * `Script::burn` /// * `Script::burn_txn_fees` Preburn { token: TypeTag, amount: u64 }, /// # Summary /// Rotates the authentication key of the sending account to the /// newly-specified public key and publishes a new shared authentication key /// under the sender's account. Any account can send this transaction. /// /// # Technical Description /// Rotates the authentication key of the sending account to `public_key`, /// and publishes a `SharedEd25519PublicKey::SharedEd25519PublicKey` resource /// containing the 32-byte ed25519 `public_key` and the `DiemAccount::KeyRotationCapability` for /// `account` under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer reference of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | 32-byte Ed25519 public key for `account`' authentication key to be rotated to and stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability` resource. | /// | `Errors::ALREADY_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | The `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is already published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `Script::rotate_shared_ed25519_public_key` PublishSharedEd25519PublicKey { public_key: Bytes }, /// # Summary /// Updates a validator's configuration. This does not reconfigure the system and will not update /// the configuration in the validator set that is seen by other validators in the network. Can /// only be successfully sent by a Validator Operator account that is already registered with a /// validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It does not emit a `DiemConfig::NewEpochEvent` /// so the copy of this config held in the validator set will not be updated, and the changes are /// only "locally" under the `validator_account` account address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `&signer` | Signer reference of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::add_validator_and_reconfigure` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` RegisterValidatorConfig { validator_account: AccountAddress, consensus_pubkey: Bytes, validator_network_addresses: Bytes, fullnode_network_addresses: Bytes, }, /// # Summary /// This script removes a validator account from the validator set, and triggers a reconfiguration /// of the system to remove the validator from the system. This transaction can only be /// successfully called by the Diem Root account. /// /// # Technical Description /// This script removes the account at `validator_address` from the validator set. This transaction /// emits a `DiemConfig::NewEpochEvent` event. Once the reconfiguration triggered by this event /// has been performed, the account at `validator_address` is no longer considered to be a /// validator in the network. This transaction will fail if the validator at `validator_address` /// is not in the validator set. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be removed from the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::ENOT_AN_ACTIVE_VALIDATOR` | The validator to be removed is not in the validator set. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::add_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` RemoveValidatorAndReconfigure { sliding_nonce: u64, validator_name: Bytes, validator_address: AccountAddress, }, /// # Summary /// Rotates the transaction sender's authentication key to the supplied new authentication key. May /// be sent by any account. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated /// its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key_with_nonce` /// * `Script::rotate_authentication_key_with_nonce_admin` /// * `Script::rotate_authentication_key_with_recovery_address` RotateAuthenticationKey { new_key: Bytes }, /// # Summary /// Rotates the sender's authentication key to the supplied new authentication key. May be sent by /// any account that has a sliding nonce resource published under it (usually this is Treasury /// Compliance or Diem Root accounts). /// /// # Technical Description /// Rotates the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated /// its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key` /// * `Script::rotate_authentication_key_with_nonce_admin` /// * `Script::rotate_authentication_key_with_recovery_address` RotateAuthenticationKeyWithNonce { sliding_nonce: u64, new_key: Bytes }, /// # Summary /// Rotates the specified account's authentication key to the supplied new authentication key. May /// only be sent by the Diem Root account as a write set transaction. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated /// its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `&signer` | Signer reference of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key` /// * `Script::rotate_authentication_key_with_nonce` /// * `Script::rotate_authentication_key_with_recovery_address` RotateAuthenticationKeyWithNonceAdmin { sliding_nonce: u64, new_key: Bytes }, /// # Summary /// Rotates the authentication key of a specified account that is part of a recovery address to a /// new authentication key. Only used for accounts that are part of a recovery address (see /// `Script::add_recovery_rotation_capability` for account restrictions). /// /// # Technical Description /// Rotates the authentication key of the `to_recover` account to `new_key` using the /// `DiemAccount::KeyRotationCapability` stored in the `RecoveryAddress::RecoveryAddress` resource /// published under `recovery_address`. This transaction can be sent either by the `to_recover` /// account, or by the account where the `RecoveryAddress::RecoveryAddress` resource is published /// that contains `to_recover`'s `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `recovery_address` | `address` | Address where `RecoveryAddress::RecoveryAddress` that holds `to_recover`'s `DiemAccount::KeyRotationCapability` is published. | /// | `to_recover` | `address` | The address of the account whose authentication key will be updated. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for the account at the `to_recover` address. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress::RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ECANNOT_ROTATE_KEY` | The address of `account` is not `recovery_address` or `to_recover`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EACCOUNT_NOT_RECOVERABLE` | `to_recover`'s `DiemAccount::KeyRotationCapability` is not in the `RecoveryAddress::RecoveryAddress` resource published under `recovery_address`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key` /// * `Script::rotate_authentication_key_with_nonce` /// * `Script::rotate_authentication_key_with_nonce_admin` RotateAuthenticationKeyWithRecoveryAddress { recovery_address: AccountAddress, to_recover: AccountAddress, new_key: Bytes, }, /// # Summary /// Updates the url used for off-chain communication, and the public key used to verify dual /// attestation on-chain. Transaction can be sent by any account that has dual attestation /// information published under it. In practice the only such accounts are Designated Dealers and /// Parent VASPs. /// /// # Technical Description /// Updates the `base_url` and `compliance_public_key` fields of the `DualAttestation::Credential` /// resource published under `account`. The `new_key` must be a valid ed25519 public key. /// /// ## Events /// Successful execution of this transaction emits two events: /// * A `DualAttestation::ComplianceKeyRotationEvent` containing the new compliance public key, and /// the blockchain time at which the key was updated emitted on the `DualAttestation::Credential` /// `compliance_key_rotation_events` handle published under `account`; and /// * A `DualAttestation::BaseUrlRotationEvent` containing the new base url to be used for /// off-chain communication, and the blockchain time at which the url was updated emitted on the /// `DualAttestation::Credential` `base_url_rotation_events` handle published under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `new_url` | `vector<u8>` | ASCII-encoded url to be used for off-chain communication with `account`. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for on-chain dual attestation checking. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DualAttestation::ECREDENTIAL` | A `DualAttestation::Credential` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_PUBLIC_KEY` | `new_key` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `Script::create_parent_vasp_account` /// * `Script::create_designated_dealer` /// * `Script::rotate_dual_attestation_info` RotateDualAttestationInfo { new_url: Bytes, new_key: Bytes }, /// # Summary /// Rotates the authentication key in a `SharedEd25519PublicKey`. This transaction can be sent by /// any account that has previously published a shared ed25519 public key using /// `Script::publish_shared_ed25519_public_key`. /// /// # Technical Description /// This first rotates the public key stored in `account`'s /// `SharedEd25519PublicKey::SharedEd25519PublicKey` resource to `public_key`, after which it /// rotates the authentication key using the capability stored in `account`'s /// `SharedEd25519PublicKey::SharedEd25519PublicKey` to a new value derived from `public_key` /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer reference of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | 32-byte Ed25519 public key. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | A `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `Script::publish_shared_ed25519_public_key` RotateSharedEd25519PublicKey { public_key: Bytes }, /// # Summary /// Updates a validator's configuration, and triggers a reconfiguration of the system to update the /// validator set with this new validator configuration. Can only be successfully sent by a /// Validator Operator account that is already registered with a validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It then emits a `DiemConfig::NewEpochEvent` to /// trigger a reconfiguration of the system. This reconfiguration will update the validator set /// on-chain with the updated `ValidatorConfig::ValidatorConfig`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `&signer` | Signer reference of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR_OPERATOR` | `validator_operator_account` does not have a Validator Operator role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::add_validator_and_reconfigure` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::register_validator_config` SetValidatorConfigAndReconfigure { validator_account: AccountAddress, consensus_pubkey: Bytes, validator_network_addresses: Bytes, fullnode_network_addresses: Bytes, }, /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by an account with /// Validator role. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the sending validator account. The account at `operator_account` address must have /// a Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` /// resource published under it. The sending `account` must be a Validator and have a /// `ValidatorConfig::ValidatorConfig` resource published under it. This script does not emit a /// `DiemConfig::NewEpochEvent` and no reconfiguration of the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer reference of the sending account of the transaction. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::add_validator_and_reconfigure` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` SetValidatorOperator { operator_name: Bytes, operator_account: AccountAddress, }, /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by the Diem Root /// account as a write set transaction. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the validator `account`. The account at `operator_account` address must have a /// Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource /// published under it. The account represented by the `account` signer must be a Validator and /// have a `ValidatorConfig::ValidatorConfig` resource published under it. No reconfiguration of /// the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `&signer` | Signer reference of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::add_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_config_and_reconfigure` SetValidatorOperatorWithNonceAdmin { sliding_nonce: u64, operator_name: Bytes, operator_account: AccountAddress, }, /// # Summary /// Mints a specified number of coins in a currency to a Designated Dealer. The sending account /// must be the Treasury Compliance account, and coins can only be minted to a Designated Dealer /// account. /// /// # Technical Description /// Mints `mint_amount` of coins in the `CoinType` currency to Designated Dealer account at /// `designated_dealer_address`. The `tier_index` parameter specifies which tier should be used to /// check verify the off-chain approval policy, and is based in part on the on-chain tier values /// for the specific Designated Dealer, and the number of `CoinType` coins that have been minted to /// the dealer over the past 24 hours. Every Designated Dealer has 4 tiers for each currency that /// they support. The sending `tc_account` must be the Treasury Compliance account, and the /// receiver an authorized Designated Dealer account. /// /// ## Events /// Successful execution of the transaction will emit two events: /// * A `Diem::MintEvent` with the amount and currency code minted is emitted on the /// `mint_event_handle` in the stored `Diem::CurrencyInfo<CoinType>` resource stored under /// `0xA550C18`; and /// * A `DesignatedDealer::ReceivedMintEvent` with the amount, currency code, and Designated /// Dealer's address is emitted on the `mint_event_handle` in the stored `DesignatedDealer::Dealer` /// resource published under the `designated_dealer_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being minted. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `designated_dealer_address` | `address` | The address of the Designated Dealer account being minted to. | /// | `mint_amount` | `u64` | The number of coins to be minted. | /// | `tier_index` | `u64` | The mint tier index to use for the Designated Dealer account. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_MINT_AMOUNT` | `mint_amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DesignatedDealer::EDEALER` | `DesignatedDealer::Dealer` or `DesignatedDealer::TierInfo<CoinType>` resource does not exist at `designated_dealer_address`. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_TIER_INDEX` | The `tier_index` is out of bounds. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_AMOUNT_FOR_TIER` | `mint_amount` exceeds the maximum allowed amount for `tier_index`. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EMINT_CAPABILITY` | `tc_account` does not have a `Diem::MintCapability<CoinType>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EMINTING_NOT_ALLOWED` | Minting is not currently allowed for `CoinType` coins. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds would exceed the `account`'s account limits. | /// /// # Related Scripts /// * `Script::create_designated_dealer` /// * `Script::peer_to_peer_with_metadata` /// * `Script::rotate_dual_attestation_info` TieredMint { coin_type: TypeTag, sliding_nonce: u64, designated_dealer_address: AccountAddress, mint_amount: u64, tier_index: u64, }, /// # Summary /// Unfreezes the account at `address`. The sending account of this transaction must be the /// Treasury Compliance account. After the successful execution of this transaction transactions /// may be sent from the previously frozen account, and coins may be sent and received. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `false` and emits a /// `AccountFreezing::UnFreezeAccountEvent`. The transaction sender must be the Treasury Compliance /// account. Note that this is a per-account property so unfreezing a Parent VASP will not effect /// the status any of its child accounts and vice versa. /// /// ## Events /// Successful execution of this script will emit a `AccountFreezing::UnFreezeAccountEvent` with /// the `unfrozen_address` set the `to_unfreeze_account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_unfreeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// /// # Related Scripts /// * `Script::freeze_account` UnfreezeAccount { sliding_nonce: u64, to_unfreeze_account: AccountAddress, }, /// # Summary /// Updates the Diem major version that is stored on-chain and is used by the VM. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the `DiemVersion` on-chain config and emits a `DiemConfig::NewEpochEvent` to trigger /// a reconfiguration of the system. The `major` version that is passed in must be strictly greater /// than the current major version held on-chain. The VM reads this information and can use it to /// preserve backwards compatibility with previous major versions of the VM. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `major` | `u64` | The `major` version of the VM to be used from this transaction on. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | /// | `Errors::INVALID_ARGUMENT` | `DiemVersion::EINVALID_MAJOR_VERSION_NUMBER` | `major` is less-than or equal to the current major version stored on-chain. | UpdateDiemVersion { sliding_nonce: u64, major: u64 }, /// # Summary /// Update the dual attestation limit on-chain. Defined in terms of micro-XDX. The transaction can /// only be sent by the Treasury Compliance account. After this transaction all inter-VASP /// payments over this limit must be checked for dual attestation. /// /// # Technical Description /// Updates the `micro_xdx_limit` field of the `DualAttestation::Limit` resource published under /// `0xA550C18`. The amount is set in micro-XDX. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_micro_xdx_limit` | `u64` | The new dual attestation limit to be used on-chain. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// /// # Related Scripts /// * `Script::update_exchange_rate` /// * `Script::update_minting_ability` UpdateDualAttestationLimit { sliding_nonce: u64, new_micro_xdx_limit: u64, }, /// # Summary /// Update the rough on-chain exchange rate between a specified currency and XDX (as a conversion /// to micro-XDX). The transaction can only be sent by the Treasury Compliance account. After this /// transaction the updated exchange rate will be used for normalization of gas prices, and for /// dual attestation checking. /// /// # Technical Description /// Updates the on-chain exchange rate from the given `Currency` to micro-XDX. The exchange rate /// is given by `new_exchange_rate_numerator/new_exchange_rate_denominator`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose exchange rate is being updated. `Currency` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for the transaction. | /// | `new_exchange_rate_numerator` | `u64` | The numerator for the new to micro-XDX exchange rate for `Currency`. | /// | `new_exchange_rate_denominator` | `u64` | The denominator for the new to micro-XDX exchange rate for `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::EDENOMINATOR` | `new_exchange_rate_denominator` is zero. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// | `Errors::LIMIT_EXCEEDED` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// /// # Related Scripts /// * `Script::update_dual_attestation_limit` /// * `Script::update_minting_ability` UpdateExchangeRate { currency: TypeTag, sliding_nonce: u64, new_exchange_rate_numerator: u64, new_exchange_rate_denominator: u64, }, /// # Summary /// Script to allow or disallow minting of new coins in a specified currency. This transaction can /// only be sent by the Treasury Compliance account. Turning minting off for a currency will have /// no effect on coins already in circulation, and coins may still be removed from the system. /// /// # Technical Description /// This transaction sets the `can_mint` field of the `Diem::CurrencyInfo<Currency>` resource /// published under `0xA550C18` to the value of `allow_minting`. Minting of coins if allowed if /// this field is set to `true` and minting of new coins in `Currency` is disallowed otherwise. /// This transaction needs to be sent by the Treasury Compliance account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose minting ability is being updated. `Currency` must be an already-registered currency on-chain. | /// | `account` | `&signer` | Signer reference of the sending account. Must be the Diem Root account. | /// | `allow_minting` | `bool` | Whether to allow minting of new coins in `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | `Currency` is not a registered currency on-chain. | /// /// # Related Scripts /// * `Script::update_dual_attestation_limit` /// * `Script::update_exchange_rate` UpdateMintingAbility { currency: TypeTag, allow_minting: bool, }, } /// Structured representation of a call into a known Move script function. /// ```ignore /// impl ScriptFunctionCall { /// pub fn encode(self) -> TransactionPayload { .. } /// pub fn decode(&TransactionPayload) -> Option<ScriptFunctionCall> { .. } /// } /// ``` #[derive(Clone, Debug, PartialEq, PartialOrd)] #[cfg_attr(feature = "fuzzing", derive(proptest_derive::Arbitrary))] #[cfg_attr(feature = "fuzzing", proptest(no_params))] pub enum ScriptFunctionCall { /// # Summary /// Adds a zero `Currency` balance to the sending `account`. This will enable `account` to /// send, receive, and hold `Diem::Diem<Currency>` coins. This transaction can be /// successfully sent by any account that is allowed to hold balances /// (e.g., VASP, Designated Dealer). /// /// # Technical Description /// After the successful execution of this transaction the sending account will have a /// `DiemAccount::Balance<Currency>` resource with zero balance published under it. Only /// accounts that can hold balances can send this transaction, the sending account cannot /// already have a `DiemAccount::Balance<Currency>` published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being added to the sending account of the transaction. `Currency` must be an already-registered currency on-chain. | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EROLE_CANT_STORE_BALANCE` | The sending `account`'s role does not permit balances. | /// | `Errors::ALREADY_PUBLISHED` | `DiemAccount::EADD_EXISTING_CURRENCY` | A balance for `Currency` is already published under the sending `account`. | /// /// # Related Scripts /// * `AccountCreationScripts::create_child_vasp_account` /// * `AccountCreationScripts::create_parent_vasp_account` /// * `PaymentScripts::peer_to_peer_with_metadata` AddCurrencyToAccount { currency: TypeTag, }, /// # Summary /// Add a DiemID domain to parent VASP account. The transaction can only be sent by /// the Treasury Compliance account. /// /// # Technical Description /// Adds a `DiemId::DiemIdDomain` to the `domains` field of the `DiemId::DiemIdDomains` resource published under /// the account at `address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `address` | `address` | The `address` of the parent VASP account that will have have `domain` added to its domains. | /// | `domain` | `vector<u8>` | The domain to be added. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAIN_MANAGER` | The `DiemId::DiemIdDomainManager` resource is not yet published under the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAINS_NOT_PUBLISHED` | `address` does not have a `DiemId::DiemIdDomains` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EDOMAIN_ALREADY_EXISTS` | The `domain` already exists in the list of `DiemId::DiemIdDomain`s in the `DiemId::DiemIdDomains` resource published under `address`. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EINVALID_DIEM_ID_DOMAIN` | The `domain` is greater in length than `DiemId::DOMAIN_LENGTH`. | AddDiemIdDomain { address: AccountAddress, domain: Bytes, }, /// # Summary /// Stores the sending accounts ability to rotate its authentication key with a designated recovery /// account. Both the sending and recovery accounts need to belong to the same VASP and /// both be VASP accounts. After this transaction both the sending account and the /// specified recovery account can rotate the sender account's authentication key. /// /// # Technical Description /// Adds the `DiemAccount::KeyRotationCapability` for the sending account /// (`to_recover_account`) to the `RecoveryAddress::RecoveryAddress` resource under /// `recovery_address`. After this transaction has been executed successfully the account at /// `recovery_address` and the `to_recover_account` may rotate the authentication key of /// `to_recover_account` (the sender of this transaction). /// /// The sending account of this transaction (`to_recover_account`) must not have previously given away its unique key /// rotation capability, and must be a VASP account. The account at `recovery_address` /// must also be a VASP account belonging to the same VASP as the `to_recover_account`. /// Additionally the account at `recovery_address` must have already initialized itself as /// a recovery account address using the `AccountAdministrationScripts::create_recovery_address` transaction script. /// /// The sending account's (`to_recover_account`) key rotation capability is /// removed in this transaction and stored in the `RecoveryAddress::RecoveryAddress` /// resource stored under the account at `recovery_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `to_recover_account` | `signer` | The signer of the sending account of this transaction. | /// | `recovery_address` | `address` | The account address where the `to_recover_account`'s `DiemAccount::KeyRotationCapability` will be stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `to_recover_account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EINVALID_KEY_ROTATION_DELEGATION` | `to_recover_account` and `recovery_address` do not belong to the same VASP. | /// | `Errors::LIMIT_EXCEEDED` | ` RecoveryAddress::EMAX_KEYS_REGISTERED` | `RecoveryAddress::MAX_REGISTERED_KEYS` have already been registered with this `recovery_address`. | /// /// # Related Scripts /// * `AccountAdministrationScripts::create_recovery_address` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` AddRecoveryRotationCapability { recovery_address: AccountAddress, }, /// # Summary /// Adds a validator account to the validator set, and triggers a /// reconfiguration of the system to admit the account to the validator set for the system. This /// transaction can only be successfully called by the Diem Root account. /// /// # Technical Description /// This script adds the account at `validator_address` to the validator set. /// This transaction emits a `DiemConfig::NewEpochEvent` event and triggers a /// reconfiguration. Once the reconfiguration triggered by this script's /// execution has been performed, the account at the `validator_address` is /// considered to be a validator in the network. /// /// This transaction script will fail if the `validator_address` address is already in the validator set /// or does not have a `ValidatorConfig::ValidatorConfig` resource already published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be added to the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EINVALID_PROSPECTIVE_VALIDATOR` | The validator to be added does not have a `ValidatorConfig::ValidatorConfig` resource published under it, or its `config` field is empty. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EALREADY_A_VALIDATOR` | The `validator_address` account is already a registered validator. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// | `Errors::LIMIT_EXCEEDED` | `DiemSystem::EMAX_VALIDATORS` | The validator set is already at its maximum size. The validator could not be added. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` AddValidatorAndReconfigure { sliding_nonce: u64, validator_name: Bytes, validator_address: AccountAddress, }, AutopayCreateInstruction { uid: u64, in_type: u8, payee: AccountAddress, end_epoch: u64, value: u64, }, AutopayDisable {}, AutopayEnable {}, BalanceTransfer { destination: AccountAddress, unscaled_value: u64, }, /// # Summary /// Burns the transaction fees collected in the `CoinType` currency so that the /// Diem association may reclaim the backing coins off-chain. May only be sent /// by the Treasury Compliance account. /// /// # Technical Description /// Burns the transaction fees collected in `CoinType` so that the /// association may reclaim the backing coins. Once this transaction has executed /// successfully all transaction fees that will have been collected in /// `CoinType` since the last time this script was called with that specific /// currency. Both `balance` and `preburn` fields in the /// `TransactionFee::TransactionFee<CoinType>` resource published under the `0xB1E55ED` /// account address will have a value of 0 after the successful execution of this script. /// /// # Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<CoinType>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being added to the sending account of the transaction. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `TransactionFee::ETRANSACTION_FEE` | `CoinType` is not an accepted transaction fee currency. | /// | `Errors::INVALID_ARGUMENT` | `Diem::ECOIN` | The collected fees in `CoinType` are zero. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::burn_with_amount` /// * `TreasuryComplianceScripts::cancel_burn_with_amount` BurnTxnFees { coin_type: TypeTag, }, /// # Summary /// Burns the coins held in a preburn resource in the preburn queue at the /// specified preburn address, which are equal to the `amount` specified in the /// transaction. Finds the first relevant outstanding preburn request with /// matching amount and removes the contained coins from the system. The sending /// account must be the Treasury Compliance account. /// The account that holds the preburn queue resource will normally be a Designated /// Dealer, but there are no enforced requirements that it be one. /// /// # Technical Description /// This transaction permanently destroys all the coins of `Token` type /// stored in the `Diem::Preburn<Token>` resource published under the /// `preburn_address` account address. /// /// This transaction will only succeed if the sending `account` has a /// `Diem::BurnCapability<Token>`, and a `Diem::Preburn<Token>` resource /// exists under `preburn_address`, with a non-zero `to_burn` field. After the successful execution /// of this transaction the `total_value` field in the /// `Diem::CurrencyInfo<Token>` resource published under `0xA550C18` will be /// decremented by the value of the `to_burn` field of the preburn resource /// under `preburn_address` immediately before this transaction, and the /// `to_burn` field of the preburn resource will have a zero value. /// /// # Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<Token>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being burned. `Token` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// | `amount` | `u64` | The amount to be burned. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_NOT_FOUND` | The `Diem::PreburnQueue<Token>` resource under `preburn_address` does not contain a preburn request with a value matching `amount`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN_QUEUE` | The account at `preburn_address` does not have a `Diem::PreburnQueue<Token>` resource published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::burn_txn_fees` /// * `TreasuryComplianceScripts::cancel_burn_with_amount` /// * `TreasuryComplianceScripts::preburn` BurnWithAmount { token: TypeTag, sliding_nonce: u64, preburn_address: AccountAddress, amount: u64, }, /// # Summary /// Cancels and returns the coins held in the preburn area under /// `preburn_address`, which are equal to the `amount` specified in the transaction. Finds the first preburn /// resource with the matching amount and returns the funds to the `preburn_address`'s balance. /// Can only be successfully sent by an account with Treasury Compliance role. /// /// # Technical Description /// Cancels and returns all coins held in the `Diem::Preburn<Token>` resource under the `preburn_address` and /// return the funds to the `preburn_address` account's `DiemAccount::Balance<Token>`. /// The transaction must be sent by an `account` with a `Diem::BurnCapability<Token>` /// resource published under it. The account at `preburn_address` must have a /// `Diem::Preburn<Token>` resource published under it, and its value must be nonzero. The transaction removes /// the entire balance held in the `Diem::Preburn<Token>` resource, and returns it back to the account's /// `DiemAccount::Balance<Token>` under `preburn_address`. Due to this, the account at /// `preburn_address` must already have a balance in the `Token` currency published /// before this script is called otherwise the transaction will fail. /// /// # Events /// The successful execution of this transaction will emit: /// * A `Diem::CancelBurnEvent` on the event handle held in the `Diem::CurrencyInfo<Token>` /// resource's `burn_events` published under `0xA550C18`. /// * A `DiemAccount::ReceivedPaymentEvent` on the `preburn_address`'s /// `DiemAccount::DiemAccount` `received_events` event handle with both the `payer` and `payee` /// being `preburn_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currenty that burning is being cancelled for. `Token` must be an already-registered currency on-chain. | /// | `account` | `signer` | The signer of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// | `amount` | `u64` | The amount to be cancelled. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_NOT_FOUND` | The `Diem::PreburnQueue<Token>` resource under `preburn_address` does not contain a preburn request with a value matching `amount`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN_QUEUE` | The account at `preburn_address` does not have a `Diem::PreburnQueue<Token>` resource published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | The account at `preburn_address` doesn't have a balance resource for `Token`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds held in the prebun area would exceed the `account`'s account limits. | /// | `Errors::INVALID_STATE` | `DualAttestation::EPAYEE_COMPLIANCE_KEY_NOT_SET` | The `account` does not have a compliance key set on it but dual attestion checking was performed. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::burn_txn_fees` /// * `TreasuryComplianceScripts::burn_with_amount` /// * `TreasuryComplianceScripts::preburn` CancelBurnWithAmount { token: TypeTag, preburn_address: AccountAddress, amount: u64, }, CommunityTransfer { destination: AccountAddress, unscaled_value: u64, memo: Bytes, }, CreateAccUser { challenge: Bytes, solution: Bytes, difficulty: u64, security: u64, }, CreateAccVal { challenge: Bytes, solution: Bytes, difficulty: u64, security: u64, ow_human_name: Bytes, op_address: AccountAddress, op_auth_key_prefix: Bytes, op_consensus_pubkey: Bytes, op_validator_network_addresses: Bytes, op_fullnode_network_addresses: Bytes, op_human_name: Bytes, }, /// # Summary /// Creates a Child VASP account with its parent being the sending account of the transaction. /// The sender of the transaction must be a Parent VASP account. /// /// # Technical Description /// Creates a `ChildVASP` account for the sender `parent_vasp` at `child_address` with a balance of /// `child_initial_balance` in `CoinType` and an initial authentication key of /// `auth_key_prefix | child_address`. Authentication key prefixes, and how to construct them from an ed25519 public key is described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// If `add_all_currencies` is true, the child address will have a zero balance in all available /// currencies in the system. /// /// The new account will be a child account of the transaction sender, which must be a /// Parent VASP account. The child account will be recorded against the limit of /// child accounts of the creating Parent VASP account. /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `child_address`, /// and the `rold_id` field being `Roles::CHILD_VASP_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// Successful execution with a `child_initial_balance` greater than zero will additionaly emit: /// * A `DiemAccount::SentPaymentEvent` with the `payee` field being `child_address`. /// This is emitted on the Parent VASP's `DiemAccount::DiemAccount` `sent_events` handle. /// * A `DiemAccount::ReceivedPaymentEvent` with the `payer` field being the Parent VASP's address. /// This is emitted on the new Child VASPS's `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` that the child account should be created with. `CoinType` must be an already-registered currency on-chain. | /// | `parent_vasp` | `signer` | The reference of the sending account. Must be a Parent VASP account. | /// | `child_address` | `address` | Address of the to-be-created Child VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// | `child_initial_balance` | `u64` | The initial balance in `CoinType` to give the child account when it's created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | The `auth_key_prefix` was not of length 32. | /// | `Errors::REQUIRES_ROLE` | `Roles::EPARENT_VASP` | The sending account wasn't a Parent VASP account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `child_address` address is already taken. | /// | `Errors::LIMIT_EXCEEDED` | `VASP::ETOO_MANY_CHILDREN` | The sending account has reached the maximum number of allowed child accounts. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for the sending account has already been extracted. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | The sending account doesn't have a balance in `CoinType`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | The sending account doesn't have at least `child_initial_balance` of `CoinType` balance. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECANNOT_CREATE_AT_VM_RESERVED` | The `child_address` is the reserved address 0x0. | /// /// # Related Scripts /// * `AccountCreationScripts::create_parent_vasp_account` /// * `AccountAdministrationScripts::add_currency_to_account` /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::add_recovery_rotation_capability` /// * `AccountAdministrationScripts::create_recovery_address` CreateChildVaspAccount { coin_type: TypeTag, child_address: AccountAddress, auth_key_prefix: Bytes, add_all_currencies: bool, child_initial_balance: u64, }, /// # Summary /// Creates a Designated Dealer account with the provided information, and initializes it with /// default mint tiers. The transaction can only be sent by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Designated Dealer role at `addr` with authentication key /// `auth_key_prefix` | `addr` and a 0 balance of type `Currency`. If `add_all_currencies` is true, /// 0 balances for all available currencies in the system will also be added. This can only be /// invoked by an account with the TreasuryCompliance role. /// Authentication keys, prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// At the time of creation the account is also initialized with default mint tiers of (500_000, /// 5000_000, 50_000_000, 500_000_000), and preburn areas for each currency that is added to the /// account. /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `addr`, /// and the `rold_id` field being `Roles::DESIGNATED_DEALER_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` that the Designated Dealer should be initialized with. `Currency` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `addr` | `address` | Address of the to-be-created Designated Dealer account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Designated Dealer. | /// | `add_all_currencies` | `bool` | Whether to publish preburn, balance, and tier info resources for all known (SCS) currencies or just `Currency` when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `addr` address is already taken. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::tiered_mint` /// * `PaymentScripts::peer_to_peer_with_metadata` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` CreateDesignatedDealer { currency: TypeTag, sliding_nonce: u64, addr: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, add_all_currencies: bool, }, /// # Summary /// Publishes a `DiemId::DiemIdDomains` resource under a parent VASP account. /// The sending account must be a parent VASP account. /// /// # Technical Description /// Publishes a `DiemId::DiemIdDomains` resource under `account`. /// The The `DiemId::DiemIdDomains` resource's `domains` field is a vector /// of DiemIdDomain, and will be empty on at the end of processing this transaction. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::ALREADY_PUBLISHED` | `DiemId::EDIEM_ID_DOMAIN` | A `DiemId::DiemIdDomains` resource has already been published under `account`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EPARENT_VASP` | The sending `account` was not a parent VASP account. | CreateDiemIdDomains {}, /// # Summary /// Creates a Parent VASP account with the specified human name. Must be called by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Parent VASP role at `address` with authentication key /// `auth_key_prefix` | `new_account_address` and a 0 balance of type `CoinType`. If /// `add_all_currencies` is true, 0 balances for all available currencies in the system will /// also be added. This can only be invoked by an TreasuryCompliance account. /// `sliding_nonce` is a unique nonce for operation, see `SlidingNonce` for details. /// Authentication keys, prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `new_account_address`, /// and the `rold_id` field being `Roles::PARENT_VASP_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` currency that the Parent VASP account should be initialized with. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Parent VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Parent VASP. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `AccountCreationScripts::create_child_vasp_account` /// * `AccountAdministrationScripts::add_currency_to_account` /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::add_recovery_rotation_capability` /// * `AccountAdministrationScripts::create_recovery_address` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` CreateParentVaspAccount { coin_type: TypeTag, sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, add_all_currencies: bool, }, /// # Summary /// Initializes the sending account as a recovery address that may be used by /// other accounts belonging to the same VASP as `account`. /// The sending account must be a VASP account, and can be either a child or parent VASP account. /// Multiple recovery addresses can exist for a single VASP, but accounts in /// each must be disjoint. /// /// # Technical Description /// Publishes a `RecoveryAddress::RecoveryAddress` resource under `account`. It then /// extracts the `DiemAccount::KeyRotationCapability` for `account` and adds /// it to the resource. After the successful execution of this transaction /// other accounts may add their key rotation to this resource so that `account` /// may be used as a recovery account for those accounts. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ENOT_A_VASP` | `account` is not a VASP account. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EKEY_ROTATION_DEPENDENCY_CYCLE` | A key rotation recovery cycle would be created by adding `account`'s key rotation capability. | /// | `Errors::ALREADY_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | A `RecoveryAddress::RecoveryAddress` resource has already been published under `account`. | /// /// # Related Scripts /// * `Script::add_recovery_rotation_capability` /// * `Script::rotate_authentication_key_with_recovery_address` CreateRecoveryAddress {}, CreateUserByCoinTx { account: AccountAddress, authkey_prefix: Bytes, unscaled_value: u64, }, /// # Summary /// Creates a Validator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorConfig::ValidatorConfig` resource with empty `config`, and /// `operator_account` fields. The `human_name` field of the /// `ValidatorConfig::ValidatorConfig` is set to the passed in `human_name`. /// This script does not add the validator to the validator set or the system, /// but only creates the account. /// Authentication keys, prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `new_account_address`, /// and the `rold_id` field being `Roles::VALIDATOR_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` CreateValidatorAccount { sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, }, /// # Summary /// Creates a Validator Operator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator Operator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorOperatorConfig::ValidatorOperatorConfig` resource with the specified `human_name`. /// This script does not assign the validator operator to any validator accounts but only creates the account. /// Authentication key prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `new_account_address`, /// and the `rold_id` field being `Roles::VALIDATOR_OPERATOR_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` CreateValidatorOperatorAccount { sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Bytes, human_name: Bytes, }, DemoE2e { world: u64, }, /// # Summary /// Freezes the account at `address`. The sending account of this transaction /// must be the Treasury Compliance account. The account being frozen cannot be /// the Diem Root or Treasury Compliance account. After the successful /// execution of this transaction no transactions may be sent from the frozen /// account, and the frozen account may not send or receive coins. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `true` and emits a /// `AccountFreezing::FreezeAccountEvent`. The transaction sender must be the /// Treasury Compliance account, but the account at `to_freeze_account` must /// not be either `0xA550C18` (the Diem Root address), or `0xB1E55ED` (the /// Treasury Compliance address). Note that this is a per-account property /// e.g., freezing a Parent VASP will not effect the status any of its child /// accounts and vice versa. /// /// # Events /// Successful execution of this transaction will emit a `AccountFreezing::FreezeAccountEvent` on /// the `freeze_event_handle` held in the `AccountFreezing::FreezeEventsHolder` resource published /// under `0xA550C18` with the `frozen_address` being the `to_freeze_account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_freeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_TC` | `to_freeze_account` was the Treasury Compliance account (`0xB1E55ED`). | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_DIEM_ROOT` | `to_freeze_account` was the Diem Root account (`0xA550C18`). | /// /// # Related Scripts /// * `TreasuryComplianceScripts::unfreeze_account` FreezeAccount { sliding_nonce: u64, to_freeze_account: AccountAddress, }, /// # Summary /// Initializes the Diem consensus config that is stored on-chain. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Initializes the `DiemConsensusConfig` on-chain config to empty and allows future updates from DiemRoot via /// `update_diem_consensus_config`. This doesn't emit a `DiemConfig::NewEpochEvent`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | InitializeDiemConsensusConfig { sliding_nonce: u64, }, Join {}, Leave {}, MinerstateCommit { challenge: Bytes, solution: Bytes, difficulty: u64, security: u64, }, MinerstateCommitByOperator { owner_address: AccountAddress, challenge: Bytes, solution: Bytes, difficulty: u64, security: u64, }, MinerstateHelper {}, /// A validator (Alice) can delegate the authority for the operation of an upgrade to another validator (Bob). When Oracle delegation happens, effectively the consensus voting power of Alice, is added to Bob only for the effect of calculating the preference on electing a stdlib binary. Whatever binary Bob proposes, Alice will also propose without needing to be submitting transactions. OlDelegateVote { dest: AccountAddress, }, /// First Bob must have delegation enabled, which can be done with: OlEnableDelegation {}, OlOracleTx { id: u64, data: Bytes, }, OlReconfigBulkUpdateSetup { alice: AccountAddress, bob: AccountAddress, carol: AccountAddress, sha: AccountAddress, ram: AccountAddress, }, /// Alice can remove Bob as the delegate with this function. OlRemoveDelegation {}, /// # Summary /// Transfers a given number of coins in a specified currency from one account to another. /// Transfers over a specified amount defined on-chain that are between two different VASPs, or /// other accounts that have opted-in will be subject to on-chain checks to ensure the receiver has /// agreed to receive the coins. This transaction can be sent by any account that can hold a /// balance, and to any account that can hold a balance. Both accounts must hold balances in the /// currency being transacted. /// /// # Technical Description /// /// Transfers `amount` coins of type `Currency` from `payer` to `payee` with (optional) associated /// `metadata` and an (optional) `metadata_signature` on the message of the form /// `metadata` | `Signer::address_of(payer)` | `amount` | `DualAttestation::DOMAIN_SEPARATOR`, that /// has been signed by the `payee`'s private key associated with the `compliance_public_key` held in /// the `payee`'s `DualAttestation::Credential`. Both the `Signer::address_of(payer)` and `amount` fields /// in the `metadata_signature` must be BCS-encoded bytes, and `|` denotes concatenation. /// The `metadata` and `metadata_signature` parameters are only required if `amount` >= /// `DualAttestation::get_cur_microdiem_limit` XDX and `payer` and `payee` are distinct VASPs. /// However, a transaction sender can opt in to dual attestation even when it is not required /// (e.g., a DesignatedDealer -> VASP payment) by providing a non-empty `metadata_signature`. /// Standardized `metadata` BCS format can be found in `diem_types::transaction::metadata::Metadata`. /// /// # Events /// Successful execution of this script emits two events: /// * A `DiemAccount::SentPaymentEvent` on `payer`'s `DiemAccount::DiemAccount` `sent_events` handle; and /// * A `DiemAccount::ReceivedPaymentEvent` on `payee`'s `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being sent in this transaction. `Currency` must be an already-registered currency on-chain. | /// | `payer` | `signer` | The signer of the sending account that coins are being transferred from. | /// | `payee` | `address` | The address of the account the coins are being transferred to. | /// | `metadata` | `vector<u8>` | Optional metadata about this payment. | /// | `metadata_signature` | `vector<u8>` | Optional signature over `metadata` and payment information. See | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `payer` doesn't hold a balance in `Currency`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Currency`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECOIN_DEPOSIT_IS_ZERO` | `amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYEE_DOES_NOT_EXIST` | No account exists at the `payee` address. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | An account exists at `payee`, but it does not accept payments in `Currency`. | /// | `Errors::INVALID_STATE` | `AccountFreezing::EACCOUNT_FROZEN` | The `payee` account is frozen. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EMALFORMED_METADATA_SIGNATURE` | `metadata_signature` is not 64 bytes. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_METADATA_SIGNATURE` | `metadata_signature` does not verify on the against the `payee'`s `DualAttestation::Credential` `compliance_public_key` public key. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EWITHDRAWAL_EXCEEDS_LIMITS` | `payer` has exceeded its daily withdrawal limits for the backing coins of XDX. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | `payee` has exceeded its daily deposit limits for XDX. | /// /// # Related Scripts /// * `AccountCreationScripts::create_child_vasp_account` /// * `AccountCreationScripts::create_parent_vasp_account` /// * `AccountAdministrationScripts::add_currency_to_account` PeerToPeerWithMetadata { currency: TypeTag, payee: AccountAddress, amount: u64, metadata: Bytes, metadata_signature: Bytes, }, /// # Summary /// Moves a specified number of coins in a given currency from the account's /// balance to its preburn area after which the coins may be burned. This /// transaction may be sent by any account that holds a balance and preburn area /// in the specified currency. /// /// # Technical Description /// Moves the specified `amount` of coins in `Token` currency from the sending `account`'s /// `DiemAccount::Balance<Token>` to the `Diem::Preburn<Token>` published under the same /// `account`. `account` must have both of these resources published under it at the start of this /// transaction in order for it to execute successfully. /// /// # Events /// Successful execution of this script emits two events: /// * `DiemAccount::SentPaymentEvent ` on `account`'s `DiemAccount::DiemAccount` `sent_events` /// handle with the `payee` and `payer` fields being `account`'s address; and /// * A `Diem::PreburnEvent` with `Token`'s currency code on the /// `Diem::CurrencyInfo<Token`'s `preburn_events` handle for `Token` and with /// `preburn_address` set to `account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being moved to the preburn area. `Token` must be an already-registered currency on-chain. | /// | `account` | `signer` | The signer of the sending account. | /// | `amount` | `u64` | The amount in `Token` to be moved to the preburn area. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for `account` has already been extracted. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `account` doesn't hold a balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | `account` doesn't have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_OCCUPIED` | The `value` field in the `Diem::Preburn<Token>` resource under the sender is non-zero. | /// | `Errors::NOT_PUBLISHED` | `Roles::EROLE_ID` | The `account` did not have a role assigned to it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDESIGNATED_DEALER` | The `account` did not have the role of DesignatedDealer. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::cancel_burn_with_amount` /// * `TreasuryComplianceScripts::burn_with_amount` /// * `TreasuryComplianceScripts::burn_txn_fees` Preburn { token: TypeTag, amount: u64, }, /// # Summary /// Rotates the authentication key of the sending account to the newly-specified ed25519 public key and /// publishes a new shared authentication key derived from that public key under the sender's account. /// Any account can send this transaction. /// /// # Technical Description /// Rotates the authentication key of the sending account to the /// [authentication key derived from `public_key`](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys) /// and publishes a `SharedEd25519PublicKey::SharedEd25519PublicKey` resource /// containing the 32-byte ed25519 `public_key` and the `DiemAccount::KeyRotationCapability` for /// `account` under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | A valid 32-byte Ed25519 public key for `account`'s authentication key to be rotated to and stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability` resource. | /// | `Errors::ALREADY_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | The `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is already published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_shared_ed25519_public_key` PublishSharedEd25519PublicKey { public_key: Bytes, }, /// # Summary /// Updates a validator's configuration. This does not reconfigure the system and will not update /// the configuration in the validator set that is seen by other validators in the network. Can /// only be successfully sent by a Validator Operator account that is already registered with a /// validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It does not emit a `DiemConfig::NewEpochEvent` /// so the copy of this config held in the validator set will not be updated, and the changes are /// only "locally" under the `validator_account` account address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `signer` | Signer of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` RegisterValidatorConfig { validator_account: AccountAddress, consensus_pubkey: Bytes, validator_network_addresses: Bytes, fullnode_network_addresses: Bytes, }, /// # Summary /// Remove a DiemID domain from parent VASP account. The transaction can only be sent by /// the Treasury Compliance account. /// /// # Technical Description /// Removes a `DiemId::DiemIdDomain` from the `domains` field of the `DiemId::DiemIdDomains` resource published under /// account with `address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `address` | `address` | The `address` of parent VASP account that will update its domains. | /// | `domain` | `vector<u8>` | The domain name. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAIN_MANAGER` | The `DiemId::DiemIdDomainManager` resource is not yet published under the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAINS_NOT_PUBLISHED` | `address` does not have a `DiemId::DiemIdDomains` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EINVALID_DIEM_ID_DOMAIN` | The `domain` is greater in length than `DiemId::DOMAIN_LENGTH`. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EDOMAIN_NOT_FOUND` | The `domain` does not exist in the list of `DiemId::DiemIdDomain`s in the `DiemId::DiemIdDomains` resource published under `address`. | RemoveDiemIdDomain { address: AccountAddress, domain: Bytes, }, /// # Summary /// This script removes a validator account from the validator set, and triggers a reconfiguration /// of the system to remove the validator from the system. This transaction can only be /// successfully called by the Diem Root account. /// /// # Technical Description /// This script removes the account at `validator_address` from the validator set. This transaction /// emits a `DiemConfig::NewEpochEvent` event. Once the reconfiguration triggered by this event /// has been performed, the account at `validator_address` is no longer considered to be a /// validator in the network. This transaction will fail if the validator at `validator_address` /// is not in the validator set. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be removed from the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::ENOT_AN_ACTIVE_VALIDATOR` | The validator to be removed is not in the validator set. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` RemoveValidatorAndReconfigure { sliding_nonce: u64, validator_name: Bytes, validator_address: AccountAddress, }, /// # Summary /// Rotates the `account`'s authentication key to the supplied new authentication key. May be sent by any account. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` /// field to `new_key`. `new_key` must be a valid authentication key that /// corresponds to an ed25519 public key as described [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys), /// and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `new_key` | `vector<u8>` | New authentication key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce_admin` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` RotateAuthenticationKey { new_key: Bytes, }, /// # Summary /// Rotates the sender's authentication key to the supplied new authentication key. May be sent by /// any account that has a sliding nonce resource published under it (usually this is Treasury /// Compliance or Diem Root accounts). /// /// # Technical Description /// Rotates the `account`'s `DiemAccount::DiemAccount` `authentication_key` /// field to `new_key`. `new_key` must be a valid authentication key that /// corresponds to an ed25519 public key as described [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys), /// and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_key` | `vector<u8>` | New authentication key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce_admin` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` RotateAuthenticationKeyWithNonce { sliding_nonce: u64, new_key: Bytes, }, /// # Summary /// Rotates the specified account's authentication key to the supplied new authentication key. May /// only be sent by the Diem Root account as a write set transaction. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid authentication key that corresponds to an ed25519 /// public key as described [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys), /// and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `signer` | Signer of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `new_key` | `vector<u8>` | New authentication key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` RotateAuthenticationKeyWithNonceAdmin { sliding_nonce: u64, new_key: Bytes, }, /// # Summary /// Rotates the authentication key of a specified account that is part of a recovery address to a /// new authentication key. Only used for accounts that are part of a recovery address (see /// `AccountAdministrationScripts::add_recovery_rotation_capability` for account restrictions). /// /// # Technical Description /// Rotates the authentication key of the `to_recover` account to `new_key` using the /// `DiemAccount::KeyRotationCapability` stored in the `RecoveryAddress::RecoveryAddress` resource /// published under `recovery_address`. `new_key` must be a valide authentication key as described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// This transaction can be sent either by the `to_recover` account, or by the account where the /// `RecoveryAddress::RecoveryAddress` resource is published that contains `to_recover`'s `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `recovery_address` | `address` | Address where `RecoveryAddress::RecoveryAddress` that holds `to_recover`'s `DiemAccount::KeyRotationCapability` is published. | /// | `to_recover` | `address` | The address of the account whose authentication key will be updated. | /// | `new_key` | `vector<u8>` | New authentication key to be used for the account at the `to_recover` address. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress::RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ECANNOT_ROTATE_KEY` | The address of `account` is not `recovery_address` or `to_recover`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EACCOUNT_NOT_RECOVERABLE` | `to_recover`'s `DiemAccount::KeyRotationCapability` is not in the `RecoveryAddress::RecoveryAddress` resource published under `recovery_address`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce_admin` RotateAuthenticationKeyWithRecoveryAddress { recovery_address: AccountAddress, to_recover: AccountAddress, new_key: Bytes, }, /// # Summary /// Updates the url used for off-chain communication, and the public key used to verify dual /// attestation on-chain. Transaction can be sent by any account that has dual attestation /// information published under it. In practice the only such accounts are Designated Dealers and /// Parent VASPs. /// /// # Technical Description /// Updates the `base_url` and `compliance_public_key` fields of the `DualAttestation::Credential` /// resource published under `account`. The `new_key` must be a valid ed25519 public key. /// /// # Events /// Successful execution of this transaction emits two events: /// * A `DualAttestation::ComplianceKeyRotationEvent` containing the new compliance public key, and /// the blockchain time at which the key was updated emitted on the `DualAttestation::Credential` /// `compliance_key_rotation_events` handle published under `account`; and /// * A `DualAttestation::BaseUrlRotationEvent` containing the new base url to be used for /// off-chain communication, and the blockchain time at which the url was updated emitted on the /// `DualAttestation::Credential` `base_url_rotation_events` handle published under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `new_url` | `vector<u8>` | ASCII-encoded url to be used for off-chain communication with `account`. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for on-chain dual attestation checking. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DualAttestation::ECREDENTIAL` | A `DualAttestation::Credential` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_PUBLIC_KEY` | `new_key` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `AccountCreationScripts::create_parent_vasp_account` /// * `AccountCreationScripts::create_designated_dealer` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` RotateDualAttestationInfo { new_url: Bytes, new_key: Bytes, }, /// # Summary /// Rotates the authentication key in a `SharedEd25519PublicKey`. This transaction can be sent by /// any account that has previously published a shared ed25519 public key using /// `AccountAdministrationScripts::publish_shared_ed25519_public_key`. /// /// # Technical Description /// `public_key` must be a valid ed25519 public key. This transaction first rotates the public key stored in `account`'s /// `SharedEd25519PublicKey::SharedEd25519PublicKey` resource to `public_key`, after which it /// rotates the `account`'s authentication key to the new authentication key derived from `public_key` as defined /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys) /// using the `DiemAccount::KeyRotationCapability` stored in `account`'s `SharedEd25519PublicKey::SharedEd25519PublicKey`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | 32-byte Ed25519 public key. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | A `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `AccountAdministrationScripts::publish_shared_ed25519_public_key` RotateSharedEd25519PublicKey { public_key: Bytes, }, /// # Summary /// Updates the gas constants stored on chain and used by the VM for gas /// metering. This transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the on-chain config holding the `DiemVMConfig` and emits a /// `DiemConfig::NewEpochEvent` to trigger a reconfiguration of the system. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `global_memory_per_byte_cost` | `u64` | The new cost to read global memory per-byte to be used for gas metering. | /// | `global_memory_per_byte_write_cost` | `u64` | The new cost to write global memory per-byte to be used for gas metering. | /// | `min_transaction_gas_units` | `u64` | The new flat minimum amount of gas required for any transaction. | /// | `large_transaction_cutoff` | `u64` | The new size over which an additional charge will be assessed for each additional byte. | /// | `intrinsic_gas_per_byte` | `u64` | The new number of units of gas that to be charged per-byte over the new `large_transaction_cutoff`. | /// | `maximum_number_of_gas_units` | `u64` | The new maximum number of gas units that can be set in a transaction. | /// | `min_price_per_gas_unit` | `u64` | The new minimum gas price that can be set for a transaction. | /// | `max_price_per_gas_unit` | `u64` | The new maximum gas price that can be set for a transaction. | /// | `max_transaction_size_in_bytes` | `u64` | The new maximum size of a transaction that can be processed. | /// | `gas_unit_scaling_factor` | `u64` | The new scaling factor to use when scaling between external and internal gas units. | /// | `default_account_size` | `u64` | The new default account size to use when assessing final costs for reads and writes to global storage. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_ARGUMENT` | `DiemVMConfig::EGAS_CONSTANT_INCONSISTENCY` | The provided gas constants are inconsistent. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | SetGasConstants { sliding_nonce: u64, global_memory_per_byte_cost: u64, global_memory_per_byte_write_cost: u64, min_transaction_gas_units: u64, large_transaction_cutoff: u64, intrinsic_gas_per_byte: u64, maximum_number_of_gas_units: u64, min_price_per_gas_unit: u64, max_price_per_gas_unit: u64, max_transaction_size_in_bytes: u64, gas_unit_scaling_factor: u64, default_account_size: u64, }, /// # Summary /// Updates a validator's configuration, and triggers a reconfiguration of the system to update the /// validator set with this new validator configuration. Can only be successfully sent by a /// Validator Operator account that is already registered with a validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It then emits a `DiemConfig::NewEpochEvent` to /// trigger a reconfiguration of the system. This reconfiguration will update the validator set /// on-chain with the updated `ValidatorConfig::ValidatorConfig`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `signer` | Signer of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR_OPERATOR` | `validator_operator_account` does not have a Validator Operator role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::register_validator_config` SetValidatorConfigAndReconfigure { validator_account: AccountAddress, consensus_pubkey: Bytes, validator_network_addresses: Bytes, fullnode_network_addresses: Bytes, }, /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by an account with /// Validator role. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the sending validator account. The account at `operator_account` address must have /// a Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` /// resource published under it. The sending `account` must be a Validator and have a /// `ValidatorConfig::ValidatorConfig` resource published under it. This script does not emit a /// `DiemConfig::NewEpochEvent` and no reconfiguration of the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` SetValidatorOperator { operator_name: Bytes, operator_account: AccountAddress, }, /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by the Diem Root /// account as a write set transaction. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the validator `account`. The account at `operator_account` address must have a /// Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource /// published under it. The account represented by the `account` signer must be a Validator and /// have a `ValidatorConfig::ValidatorConfig` resource published under it. No reconfiguration of /// the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | Signer of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `signer` | Signer of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` SetValidatorOperatorWithNonceAdmin { sliding_nonce: u64, operator_name: Bytes, operator_account: AccountAddress, }, SetWalletType { type_of: u8, }, /// # Summary /// Mints a specified number of coins in a currency to a Designated Dealer. The sending account /// must be the Treasury Compliance account, and coins can only be minted to a Designated Dealer /// account. /// /// # Technical Description /// Mints `mint_amount` of coins in the `CoinType` currency to Designated Dealer account at /// `designated_dealer_address`. The `tier_index` parameter specifies which tier should be used to /// check verify the off-chain approval policy, and is based in part on the on-chain tier values /// for the specific Designated Dealer, and the number of `CoinType` coins that have been minted to /// the dealer over the past 24 hours. Every Designated Dealer has 4 tiers for each currency that /// they support. The sending `tc_account` must be the Treasury Compliance account, and the /// receiver an authorized Designated Dealer account. /// /// # Events /// Successful execution of the transaction will emit two events: /// * A `Diem::MintEvent` with the amount and currency code minted is emitted on the /// `mint_event_handle` in the stored `Diem::CurrencyInfo<CoinType>` resource stored under /// `0xA550C18`; and /// * A `DesignatedDealer::ReceivedMintEvent` with the amount, currency code, and Designated /// Dealer's address is emitted on the `mint_event_handle` in the stored `DesignatedDealer::Dealer` /// resource published under the `designated_dealer_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being minted. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `designated_dealer_address` | `address` | The address of the Designated Dealer account being minted to. | /// | `mint_amount` | `u64` | The number of coins to be minted. | /// | `tier_index` | `u64` | [Deprecated] The mint tier index to use for the Designated Dealer account. Will be ignored | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_MINT_AMOUNT` | `mint_amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DesignatedDealer::EDEALER` | `DesignatedDealer::Dealer` or `DesignatedDealer::TierInfo<CoinType>` resource does not exist at `designated_dealer_address`. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EMINT_CAPABILITY` | `tc_account` does not have a `Diem::MintCapability<CoinType>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EMINTING_NOT_ALLOWED` | Minting is not currently allowed for `CoinType` coins. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds would exceed the `account`'s account limits. | /// /// # Related Scripts /// * `AccountCreationScripts::create_designated_dealer` /// * `PaymentScripts::peer_to_peer_with_metadata` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` TieredMint { coin_type: TypeTag, sliding_nonce: u64, designated_dealer_address: AccountAddress, mint_amount: u64, tier_index: u64, }, /// # Summary /// Unfreezes the account at `address`. The sending account of this transaction must be the /// Treasury Compliance account. After the successful execution of this transaction transactions /// may be sent from the previously frozen account, and coins may be sent and received. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `false` and emits a /// `AccountFreezing::UnFreezeAccountEvent`. The transaction sender must be the Treasury Compliance /// account. Note that this is a per-account property so unfreezing a Parent VASP will not effect /// the status any of its child accounts and vice versa. /// /// # Events /// Successful execution of this script will emit a `AccountFreezing::UnFreezeAccountEvent` with /// the `unfrozen_address` set the `to_unfreeze_account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_unfreeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::freeze_account` UnfreezeAccount { sliding_nonce: u64, to_unfreeze_account: AccountAddress, }, /// # Summary /// Updates the Diem consensus config that is stored on-chain and is used by the Consensus. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the `DiemConsensusConfig` on-chain config and emits a `DiemConfig::NewEpochEvent` to trigger /// a reconfiguration of the system. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `config` | `vector<u8>` | The serialized bytes of consensus config. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | UpdateDiemConsensusConfig { sliding_nonce: u64, config: Bytes, }, /// # Summary /// Updates the Diem major version that is stored on-chain and is used by the VM. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the `DiemVersion` on-chain config and emits a `DiemConfig::NewEpochEvent` to trigger /// a reconfiguration of the system. The `major` version that is passed in must be strictly greater /// than the current major version held on-chain. The VM reads this information and can use it to /// preserve backwards compatibility with previous major versions of the VM. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `major` | `u64` | The `major` version of the VM to be used from this transaction on. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | /// | `Errors::INVALID_ARGUMENT` | `DiemVersion::EINVALID_MAJOR_VERSION_NUMBER` | `major` is less-than or equal to the current major version stored on-chain. | UpdateDiemVersion { sliding_nonce: u64, major: u64, }, /// # Summary /// Update the dual attestation limit on-chain. Defined in terms of micro-XDX. The transaction can /// only be sent by the Treasury Compliance account. After this transaction all inter-VASP /// payments over this limit must be checked for dual attestation. /// /// # Technical Description /// Updates the `micro_xdx_limit` field of the `DualAttestation::Limit` resource published under /// `0xA550C18`. The amount is set in micro-XDX. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_micro_xdx_limit` | `u64` | The new dual attestation limit to be used on-chain. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::update_exchange_rate` /// * `TreasuryComplianceScripts::update_minting_ability` UpdateDualAttestationLimit { sliding_nonce: u64, new_micro_xdx_limit: u64, }, /// # Summary /// Update the rough on-chain exchange rate between a specified currency and XDX (as a conversion /// to micro-XDX). The transaction can only be sent by the Treasury Compliance account. After this /// transaction the updated exchange rate will be used for normalization of gas prices, and for /// dual attestation checking. /// /// # Technical Description /// Updates the on-chain exchange rate from the given `Currency` to micro-XDX. The exchange rate /// is given by `new_exchange_rate_numerator/new_exchange_rate_denominator`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose exchange rate is being updated. `Currency` must be an already-registered currency on-chain. | /// | `dm_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for the transaction. | /// | `new_exchange_rate_numerator` | `u64` | The numerator for the new to micro-XDX exchange rate for `Currency`. | /// | `new_exchange_rate_denominator` | `u64` | The denominator for the new to micro-XDX exchange rate for `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dm_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `dm_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `dm_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::EDENOMINATOR` | `new_exchange_rate_denominator` is zero. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// | `Errors::LIMIT_EXCEEDED` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::update_dual_attestation_limit` /// * `TreasuryComplianceScripts::update_minting_ability` UpdateExchangeRate { currency: TypeTag, sliding_nonce: u64, new_exchange_rate_numerator: u64, new_exchange_rate_denominator: u64, }, /// # Summary /// Script to allow or disallow minting of new coins in a specified currency. This transaction can /// only be sent by the Treasury Compliance account. Turning minting off for a currency will have /// no effect on coins already in circulation, and coins may still be removed from the system. /// /// # Technical Description /// This transaction sets the `can_mint` field of the `Diem::CurrencyInfo<Currency>` resource /// published under `0xA550C18` to the value of `allow_minting`. Minting of coins if allowed if /// this field is set to `true` and minting of new coins in `Currency` is disallowed otherwise. /// This transaction needs to be sent by the Treasury Compliance account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose minting ability is being updated. `Currency` must be an already-registered currency on-chain. | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `allow_minting` | `bool` | Whether to allow minting of new coins in `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | `Currency` is not a registered currency on-chain. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::update_dual_attestation_limit` /// * `TreasuryComplianceScripts::update_exchange_rate` UpdateMintingAbility { currency: TypeTag, allow_minting: bool, }, ValAddSelf {}, } impl ScriptCall { /// Build a Diem `Script` from a structured object `ScriptCall`. pub fn encode(self) -> Script { use ScriptCall::*; match self { AddCurrencyToAccount { currency } => encode_add_currency_to_account_script(currency), AddRecoveryRotationCapability { recovery_address } => { encode_add_recovery_rotation_capability_script(recovery_address) } AddValidatorAndReconfigure { sliding_nonce, validator_name, validator_address, } => encode_add_validator_and_reconfigure_script( sliding_nonce, validator_name, validator_address, ), Burn { token, sliding_nonce, preburn_address, } => encode_burn_script(token, sliding_nonce, preburn_address), BurnTxnFees { coin_type } => encode_burn_txn_fees_script(coin_type), CancelBurn { token, preburn_address, } => encode_cancel_burn_script(token, preburn_address), CreateChildVaspAccount { coin_type, child_address, auth_key_prefix, add_all_currencies, child_initial_balance, } => encode_create_child_vasp_account_script( coin_type, child_address, auth_key_prefix, add_all_currencies, child_initial_balance, ), CreateDesignatedDealer { currency, sliding_nonce, addr, auth_key_prefix, human_name, add_all_currencies, } => encode_create_designated_dealer_script( currency, sliding_nonce, addr, auth_key_prefix, human_name, add_all_currencies, ), CreateParentVaspAccount { coin_type, sliding_nonce, new_account_address, auth_key_prefix, human_name, add_all_currencies, } => encode_create_parent_vasp_account_script( coin_type, sliding_nonce, new_account_address, auth_key_prefix, human_name, add_all_currencies, ), CreateRecoveryAddress {} => encode_create_recovery_address_script(), CreateValidatorAccount { sliding_nonce, new_account_address, auth_key_prefix, human_name, } => encode_create_validator_account_script( sliding_nonce, new_account_address, auth_key_prefix, human_name, ), CreateValidatorOperatorAccount { sliding_nonce, new_account_address, auth_key_prefix, human_name, } => encode_create_validator_operator_account_script( sliding_nonce, new_account_address, auth_key_prefix, human_name, ), FreezeAccount { sliding_nonce, to_freeze_account, } => encode_freeze_account_script(sliding_nonce, to_freeze_account), PeerToPeerWithMetadata { currency, payee, amount, metadata, metadata_signature, } => encode_peer_to_peer_with_metadata_script( currency, payee, amount, metadata, metadata_signature, ), Preburn { token, amount } => encode_preburn_script(token, amount), PublishSharedEd25519PublicKey { public_key } => { encode_publish_shared_ed25519_public_key_script(public_key) } RegisterValidatorConfig { validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, } => encode_register_validator_config_script( validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, ), RemoveValidatorAndReconfigure { sliding_nonce, validator_name, validator_address, } => encode_remove_validator_and_reconfigure_script( sliding_nonce, validator_name, validator_address, ), RotateAuthenticationKey { new_key } => encode_rotate_authentication_key_script(new_key), RotateAuthenticationKeyWithNonce { sliding_nonce, new_key, } => encode_rotate_authentication_key_with_nonce_script(sliding_nonce, new_key), RotateAuthenticationKeyWithNonceAdmin { sliding_nonce, new_key, } => encode_rotate_authentication_key_with_nonce_admin_script(sliding_nonce, new_key), RotateAuthenticationKeyWithRecoveryAddress { recovery_address, to_recover, new_key, } => encode_rotate_authentication_key_with_recovery_address_script( recovery_address, to_recover, new_key, ), RotateDualAttestationInfo { new_url, new_key } => { encode_rotate_dual_attestation_info_script(new_url, new_key) } RotateSharedEd25519PublicKey { public_key } => { encode_rotate_shared_ed25519_public_key_script(public_key) } SetValidatorConfigAndReconfigure { validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, } => encode_set_validator_config_and_reconfigure_script( validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, ), SetValidatorOperator { operator_name, operator_account, } => encode_set_validator_operator_script(operator_name, operator_account), SetValidatorOperatorWithNonceAdmin { sliding_nonce, operator_name, operator_account, } => encode_set_validator_operator_with_nonce_admin_script( sliding_nonce, operator_name, operator_account, ), TieredMint { coin_type, sliding_nonce, designated_dealer_address, mint_amount, tier_index, } => encode_tiered_mint_script( coin_type, sliding_nonce, designated_dealer_address, mint_amount, tier_index, ), UnfreezeAccount { sliding_nonce, to_unfreeze_account, } => encode_unfreeze_account_script(sliding_nonce, to_unfreeze_account), UpdateDiemVersion { sliding_nonce, major, } => encode_update_diem_version_script(sliding_nonce, major), UpdateDualAttestationLimit { sliding_nonce, new_micro_xdx_limit, } => encode_update_dual_attestation_limit_script(sliding_nonce, new_micro_xdx_limit), UpdateExchangeRate { currency, sliding_nonce, new_exchange_rate_numerator, new_exchange_rate_denominator, } => encode_update_exchange_rate_script( currency, sliding_nonce, new_exchange_rate_numerator, new_exchange_rate_denominator, ), UpdateMintingAbility { currency, allow_minting, } => encode_update_minting_ability_script(currency, allow_minting), } } /// Try to recognize a Diem `Script` and convert it into a structured object `ScriptCall`. pub fn decode(script: &Script) -> Option<ScriptCall> { match TRANSACTION_SCRIPT_DECODER_MAP.get(script.code()) { Some(decoder) => decoder(script), None => None, } } /// Return the name of a Diem `Script` from a structured object `ScriptCall`. pub fn name(&self) -> &'static str { use ScriptCall::*; match self { AddCurrencyToAccount { .. } => "add_currency_to_account", AddRecoveryRotationCapability { .. } => "add_recovery_rotation_capability", AddValidatorAndReconfigure { .. } => "add_validator_and_reconfigure", Burn { .. } => "burn", BurnTxnFees { .. } => "burn_txn_fees", CancelBurn { .. } => "cancel_burn", CreateChildVaspAccount { .. } => "create_child_vasp_account", CreateDesignatedDealer { .. } => "create_designated_dealer", CreateParentVaspAccount { .. } => "create_parent_vasp_account", CreateRecoveryAddress { .. } => "create_recovery_address", CreateValidatorAccount { .. } => "create_validator_account", CreateValidatorOperatorAccount { .. } => "create_validator_operator_account", FreezeAccount { .. } => "freeze_account", PeerToPeerWithMetadata { .. } => "peer_to_peer_with_metadata", Preburn { .. } => "preburn", PublishSharedEd25519PublicKey { .. } => "publish_shared_ed25519_public_key", RegisterValidatorConfig { .. } => "register_validator_config", RemoveValidatorAndReconfigure { .. } => "remove_validator_and_reconfigure", RotateAuthenticationKey { .. } => "rotate_authentication_key", RotateAuthenticationKeyWithNonce { .. } => "rotate_authentication_key_with_nonce", RotateAuthenticationKeyWithNonceAdmin { .. } => { "rotate_authentication_key_with_nonce_admin" } RotateAuthenticationKeyWithRecoveryAddress { .. } => { "rotate_authentication_key_with_recovery_address" } RotateDualAttestationInfo { .. } => "rotate_dual_attestation_info", RotateSharedEd25519PublicKey { .. } => "rotate_shared_ed25519_public_key", SetValidatorConfigAndReconfigure { .. } => "set_validator_config_and_reconfigure", SetValidatorOperator { .. } => "set_validator_operator", SetValidatorOperatorWithNonceAdmin { .. } => "set_validator_operator_with_nonce_admin", TieredMint { .. } => "tiered_mint", UnfreezeAccount { .. } => "unfreeze_account", UpdateDiemVersion { .. } => "update_diem_version", UpdateDualAttestationLimit { .. } => "update_dual_attestation_limit", UpdateExchangeRate { .. } => "update_exchange_rate", UpdateMintingAbility { .. } => "update_minting_ability", } } } impl ScriptFunctionCall { /// Build a Diem `TransactionPayload` from a structured object `ScriptFunctionCall`. pub fn encode(self) -> TransactionPayload { use ScriptFunctionCall::*; match self { AddCurrencyToAccount { currency } => { encode_add_currency_to_account_script_function(currency) } AddDiemIdDomain { address, domain } => { encode_add_diem_id_domain_script_function(address, domain) } AddRecoveryRotationCapability { recovery_address } => { encode_add_recovery_rotation_capability_script_function(recovery_address) } AddValidatorAndReconfigure { sliding_nonce, validator_name, validator_address, } => encode_add_validator_and_reconfigure_script_function( sliding_nonce, validator_name, validator_address, ), AutopayCreateInstruction { uid, in_type, payee, end_epoch, value, } => encode_autopay_create_instruction_script_function( uid, in_type, payee, end_epoch, value, ), AutopayDisable {} => encode_autopay_disable_script_function(), AutopayEnable {} => encode_autopay_enable_script_function(), BalanceTransfer { destination, unscaled_value, } => encode_balance_transfer_script_function(destination, unscaled_value), BurnTxnFees { coin_type } => encode_burn_txn_fees_script_function(coin_type), BurnWithAmount { token, sliding_nonce, preburn_address, amount, } => encode_burn_with_amount_script_function( token, sliding_nonce, preburn_address, amount, ), CancelBurnWithAmount { token, preburn_address, amount, } => encode_cancel_burn_with_amount_script_function(token, preburn_address, amount), CommunityTransfer { destination, unscaled_value, memo, } => encode_community_transfer_script_function(destination, unscaled_value, memo), CreateAccUser { challenge, solution, difficulty, security, } => encode_create_acc_user_script_function(challenge, solution, difficulty, security), CreateAccVal { challenge, solution, difficulty, security, ow_human_name, op_address, op_auth_key_prefix, op_consensus_pubkey, op_validator_network_addresses, op_fullnode_network_addresses, op_human_name, } => encode_create_acc_val_script_function( challenge, solution, difficulty, security, ow_human_name, op_address, op_auth_key_prefix, op_consensus_pubkey, op_validator_network_addresses, op_fullnode_network_addresses, op_human_name, ), CreateChildVaspAccount { coin_type, child_address, auth_key_prefix, add_all_currencies, child_initial_balance, } => encode_create_child_vasp_account_script_function( coin_type, child_address, auth_key_prefix, add_all_currencies, child_initial_balance, ), CreateDesignatedDealer { currency, sliding_nonce, addr, auth_key_prefix, human_name, add_all_currencies, } => encode_create_designated_dealer_script_function( currency, sliding_nonce, addr, auth_key_prefix, human_name, add_all_currencies, ), CreateDiemIdDomains {} => encode_create_diem_id_domains_script_function(), CreateParentVaspAccount { coin_type, sliding_nonce, new_account_address, auth_key_prefix, human_name, add_all_currencies, } => encode_create_parent_vasp_account_script_function( coin_type, sliding_nonce, new_account_address, auth_key_prefix, human_name, add_all_currencies, ), CreateRecoveryAddress {} => encode_create_recovery_address_script_function(), CreateUserByCoinTx { account, authkey_prefix, unscaled_value, } => encode_create_user_by_coin_tx_script_function( account, authkey_prefix, unscaled_value, ), CreateValidatorAccount { sliding_nonce, new_account_address, auth_key_prefix, human_name, } => encode_create_validator_account_script_function( sliding_nonce, new_account_address, auth_key_prefix, human_name, ), CreateValidatorOperatorAccount { sliding_nonce, new_account_address, auth_key_prefix, human_name, } => encode_create_validator_operator_account_script_function( sliding_nonce, new_account_address, auth_key_prefix, human_name, ), DemoE2e { world } => encode_demo_e2e_script_function(world), FreezeAccount { sliding_nonce, to_freeze_account, } => encode_freeze_account_script_function(sliding_nonce, to_freeze_account), InitializeDiemConsensusConfig { sliding_nonce } => { encode_initialize_diem_consensus_config_script_function(sliding_nonce) } Join {} => encode_join_script_function(), Leave {} => encode_leave_script_function(), MinerstateCommit { challenge, solution, difficulty, security, } => { encode_minerstate_commit_script_function(challenge, solution, difficulty, security) } MinerstateCommitByOperator { owner_address, challenge, solution, difficulty, security, } => encode_minerstate_commit_by_operator_script_function( owner_address, challenge, solution, difficulty, security, ), MinerstateHelper {} => encode_minerstate_helper_script_function(), OlDelegateVote { dest } => encode_ol_delegate_vote_script_function(dest), OlEnableDelegation {} => encode_ol_enable_delegation_script_function(), OlOracleTx { id, data } => encode_ol_oracle_tx_script_function(id, data), OlReconfigBulkUpdateSetup { alice, bob, carol, sha, ram, } => encode_ol_reconfig_bulk_update_setup_script_function(alice, bob, carol, sha, ram), OlRemoveDelegation {} => encode_ol_remove_delegation_script_function(), PeerToPeerWithMetadata { currency, payee, amount, metadata, metadata_signature, } => encode_peer_to_peer_with_metadata_script_function( currency, payee, amount, metadata, metadata_signature, ), Preburn { token, amount } => encode_preburn_script_function(token, amount), PublishSharedEd25519PublicKey { public_key } => { encode_publish_shared_ed25519_public_key_script_function(public_key) } RegisterValidatorConfig { validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, } => encode_register_validator_config_script_function( validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, ), RemoveDiemIdDomain { address, domain } => { encode_remove_diem_id_domain_script_function(address, domain) } RemoveValidatorAndReconfigure { sliding_nonce, validator_name, validator_address, } => encode_remove_validator_and_reconfigure_script_function( sliding_nonce, validator_name, validator_address, ), RotateAuthenticationKey { new_key } => { encode_rotate_authentication_key_script_function(new_key) } RotateAuthenticationKeyWithNonce { sliding_nonce, new_key, } => { encode_rotate_authentication_key_with_nonce_script_function(sliding_nonce, new_key) } RotateAuthenticationKeyWithNonceAdmin { sliding_nonce, new_key, } => encode_rotate_authentication_key_with_nonce_admin_script_function( sliding_nonce, new_key, ), RotateAuthenticationKeyWithRecoveryAddress { recovery_address, to_recover, new_key, } => encode_rotate_authentication_key_with_recovery_address_script_function( recovery_address, to_recover, new_key, ), RotateDualAttestationInfo { new_url, new_key } => { encode_rotate_dual_attestation_info_script_function(new_url, new_key) } RotateSharedEd25519PublicKey { public_key } => { encode_rotate_shared_ed25519_public_key_script_function(public_key) } SetGasConstants { sliding_nonce, global_memory_per_byte_cost, global_memory_per_byte_write_cost, min_transaction_gas_units, large_transaction_cutoff, intrinsic_gas_per_byte, maximum_number_of_gas_units, min_price_per_gas_unit, max_price_per_gas_unit, max_transaction_size_in_bytes, gas_unit_scaling_factor, default_account_size, } => encode_set_gas_constants_script_function( sliding_nonce, global_memory_per_byte_cost, global_memory_per_byte_write_cost, min_transaction_gas_units, large_transaction_cutoff, intrinsic_gas_per_byte, maximum_number_of_gas_units, min_price_per_gas_unit, max_price_per_gas_unit, max_transaction_size_in_bytes, gas_unit_scaling_factor, default_account_size, ), SetValidatorConfigAndReconfigure { validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, } => encode_set_validator_config_and_reconfigure_script_function( validator_account, consensus_pubkey, validator_network_addresses, fullnode_network_addresses, ), SetValidatorOperator { operator_name, operator_account, } => encode_set_validator_operator_script_function(operator_name, operator_account), SetValidatorOperatorWithNonceAdmin { sliding_nonce, operator_name, operator_account, } => encode_set_validator_operator_with_nonce_admin_script_function( sliding_nonce, operator_name, operator_account, ), SetWalletType { type_of } => encode_set_wallet_type_script_function(type_of), TieredMint { coin_type, sliding_nonce, designated_dealer_address, mint_amount, tier_index, } => encode_tiered_mint_script_function( coin_type, sliding_nonce, designated_dealer_address, mint_amount, tier_index, ), UnfreezeAccount { sliding_nonce, to_unfreeze_account, } => encode_unfreeze_account_script_function(sliding_nonce, to_unfreeze_account), UpdateDiemConsensusConfig { sliding_nonce, config, } => encode_update_diem_consensus_config_script_function(sliding_nonce, config), UpdateDiemVersion { sliding_nonce, major, } => encode_update_diem_version_script_function(sliding_nonce, major), UpdateDualAttestationLimit { sliding_nonce, new_micro_xdx_limit, } => encode_update_dual_attestation_limit_script_function( sliding_nonce, new_micro_xdx_limit, ), UpdateExchangeRate { currency, sliding_nonce, new_exchange_rate_numerator, new_exchange_rate_denominator, } => encode_update_exchange_rate_script_function( currency, sliding_nonce, new_exchange_rate_numerator, new_exchange_rate_denominator, ), UpdateMintingAbility { currency, allow_minting, } => encode_update_minting_ability_script_function(currency, allow_minting), ValAddSelf {} => encode_val_add_self_script_function(), } } /// Try to recognize a Diem `TransactionPayload` and convert it into a structured object `ScriptFunctionCall`. pub fn decode(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { match SCRIPT_FUNCTION_DECODER_MAP.get(&format!( "{}{}", script.module().name(), script.function() )) { Some(decoder) => decoder(payload), None => None, } } else { None } } } /// # Summary /// Adds a zero `Currency` balance to the sending `account`. This will enable `account` to /// send, receive, and hold `Diem::Diem<Currency>` coins. This transaction can be /// successfully sent by any account that is allowed to hold balances /// (e.g., VASP, Designated Dealer). /// /// # Technical Description /// After the successful execution of this transaction the sending account will have a /// `DiemAccount::Balance<Currency>` resource with zero balance published under it. Only /// accounts that can hold balances can send this transaction, the sending account cannot /// already have a `DiemAccount::Balance<Currency>` published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being added to the sending account of the transaction. `Currency` must be an already-registered currency on-chain. | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EROLE_CANT_STORE_BALANCE` | The sending `account`'s role does not permit balances. | /// | `Errors::ALREADY_PUBLISHED` | `DiemAccount::EADD_EXISTING_CURRENCY` | A balance for `Currency` is already published under the sending `account`. | /// /// # Related Scripts /// * `AccountCreationScripts::create_child_vasp_account` /// * `AccountCreationScripts::create_parent_vasp_account` /// * `PaymentScripts::peer_to_peer_with_metadata` pub fn encode_add_currency_to_account_script_function(currency: TypeTag) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("add_currency_to_account").to_owned(), vec![currency], vec![], )) } /// # Summary /// Add a DiemID domain to parent VASP account. The transaction can only be sent by /// the Treasury Compliance account. /// /// # Technical Description /// Adds a `DiemId::DiemIdDomain` to the `domains` field of the `DiemId::DiemIdDomains` resource published under /// the account at `address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `address` | `address` | The `address` of the parent VASP account that will have have `domain` added to its domains. | /// | `domain` | `vector<u8>` | The domain to be added. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAIN_MANAGER` | The `DiemId::DiemIdDomainManager` resource is not yet published under the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAINS_NOT_PUBLISHED` | `address` does not have a `DiemId::DiemIdDomains` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EDOMAIN_ALREADY_EXISTS` | The `domain` already exists in the list of `DiemId::DiemIdDomain`s in the `DiemId::DiemIdDomains` resource published under `address`. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EINVALID_DIEM_ID_DOMAIN` | The `domain` is greater in length than `DiemId::DOMAIN_LENGTH`. | pub fn encode_add_diem_id_domain_script_function( address: AccountAddress, domain: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("add_diem_id_domain").to_owned(), vec![], vec![ bcs::to_bytes(&address).unwrap(), bcs::to_bytes(&domain).unwrap(), ], )) } /// # Summary /// Stores the sending accounts ability to rotate its authentication key with a designated recovery /// account. Both the sending and recovery accounts need to belong to the same VASP and /// both be VASP accounts. After this transaction both the sending account and the /// specified recovery account can rotate the sender account's authentication key. /// /// # Technical Description /// Adds the `DiemAccount::KeyRotationCapability` for the sending account /// (`to_recover_account`) to the `RecoveryAddress::RecoveryAddress` resource under /// `recovery_address`. After this transaction has been executed successfully the account at /// `recovery_address` and the `to_recover_account` may rotate the authentication key of /// `to_recover_account` (the sender of this transaction). /// /// The sending account of this transaction (`to_recover_account`) must not have previously given away its unique key /// rotation capability, and must be a VASP account. The account at `recovery_address` /// must also be a VASP account belonging to the same VASP as the `to_recover_account`. /// Additionally the account at `recovery_address` must have already initialized itself as /// a recovery account address using the `AccountAdministrationScripts::create_recovery_address` transaction script. /// /// The sending account's (`to_recover_account`) key rotation capability is /// removed in this transaction and stored in the `RecoveryAddress::RecoveryAddress` /// resource stored under the account at `recovery_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `to_recover_account` | `signer` | The signer of the sending account of this transaction. | /// | `recovery_address` | `address` | The account address where the `to_recover_account`'s `DiemAccount::KeyRotationCapability` will be stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `to_recover_account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EINVALID_KEY_ROTATION_DELEGATION` | `to_recover_account` and `recovery_address` do not belong to the same VASP. | /// | `Errors::LIMIT_EXCEEDED` | ` RecoveryAddress::EMAX_KEYS_REGISTERED` | `RecoveryAddress::MAX_REGISTERED_KEYS` have already been registered with this `recovery_address`. | /// /// # Related Scripts /// * `AccountAdministrationScripts::create_recovery_address` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` pub fn encode_add_recovery_rotation_capability_script_function( recovery_address: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("add_recovery_rotation_capability").to_owned(), vec![], vec![bcs::to_bytes(&recovery_address).unwrap()], )) } /// # Summary /// Adds a validator account to the validator set, and triggers a /// reconfiguration of the system to admit the account to the validator set for the system. This /// transaction can only be successfully called by the Diem Root account. /// /// # Technical Description /// This script adds the account at `validator_address` to the validator set. /// This transaction emits a `DiemConfig::NewEpochEvent` event and triggers a /// reconfiguration. Once the reconfiguration triggered by this script's /// execution has been performed, the account at the `validator_address` is /// considered to be a validator in the network. /// /// This transaction script will fail if the `validator_address` address is already in the validator set /// or does not have a `ValidatorConfig::ValidatorConfig` resource already published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be added to the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EINVALID_PROSPECTIVE_VALIDATOR` | The validator to be added does not have a `ValidatorConfig::ValidatorConfig` resource published under it, or its `config` field is empty. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EALREADY_A_VALIDATOR` | The `validator_address` account is already a registered validator. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// | `Errors::LIMIT_EXCEEDED` | `DiemSystem::EMAX_VALIDATORS` | The validator set is already at its maximum size. The validator could not be added. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` pub fn encode_add_validator_and_reconfigure_script_function( sliding_nonce: u64, validator_name: Vec<u8>, validator_address: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorAdministrationScripts").to_owned(), ), ident_str!("add_validator_and_reconfigure").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&validator_name).unwrap(), bcs::to_bytes(&validator_address).unwrap(), ], )) } pub fn encode_autopay_create_instruction_script_function( uid: u64, in_type: u8, payee: AccountAddress, end_epoch: u64, value: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AutoPayScripts").to_owned(), ), ident_str!("autopay_create_instruction").to_owned(), vec![], vec![ bcs::to_bytes(&uid).unwrap(), bcs::to_bytes(&in_type).unwrap(), bcs::to_bytes(&payee).unwrap(), bcs::to_bytes(&end_epoch).unwrap(), bcs::to_bytes(&value).unwrap(), ], )) } pub fn encode_autopay_disable_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AutoPayScripts").to_owned(), ), ident_str!("autopay_disable").to_owned(), vec![], vec![], )) } pub fn encode_autopay_enable_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AutoPayScripts").to_owned(), ), ident_str!("autopay_enable").to_owned(), vec![], vec![], )) } pub fn encode_balance_transfer_script_function( destination: AccountAddress, unscaled_value: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TransferScripts").to_owned(), ), ident_str!("balance_transfer").to_owned(), vec![], vec![ bcs::to_bytes(&destination).unwrap(), bcs::to_bytes(&unscaled_value).unwrap(), ], )) } /// # Summary /// Burns the transaction fees collected in the `CoinType` currency so that the /// Diem association may reclaim the backing coins off-chain. May only be sent /// by the Treasury Compliance account. /// /// # Technical Description /// Burns the transaction fees collected in `CoinType` so that the /// association may reclaim the backing coins. Once this transaction has executed /// successfully all transaction fees that will have been collected in /// `CoinType` since the last time this script was called with that specific /// currency. Both `balance` and `preburn` fields in the /// `TransactionFee::TransactionFee<CoinType>` resource published under the `0xB1E55ED` /// account address will have a value of 0 after the successful execution of this script. /// /// # Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<CoinType>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being added to the sending account of the transaction. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `TransactionFee::ETRANSACTION_FEE` | `CoinType` is not an accepted transaction fee currency. | /// | `Errors::INVALID_ARGUMENT` | `Diem::ECOIN` | The collected fees in `CoinType` are zero. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::burn_with_amount` /// * `TreasuryComplianceScripts::cancel_burn_with_amount` pub fn encode_burn_txn_fees_script_function(coin_type: TypeTag) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("burn_txn_fees").to_owned(), vec![coin_type], vec![], )) } /// # Summary /// Burns the coins held in a preburn resource in the preburn queue at the /// specified preburn address, which are equal to the `amount` specified in the /// transaction. Finds the first relevant outstanding preburn request with /// matching amount and removes the contained coins from the system. The sending /// account must be the Treasury Compliance account. /// The account that holds the preburn queue resource will normally be a Designated /// Dealer, but there are no enforced requirements that it be one. /// /// # Technical Description /// This transaction permanently destroys all the coins of `Token` type /// stored in the `Diem::Preburn<Token>` resource published under the /// `preburn_address` account address. /// /// This transaction will only succeed if the sending `account` has a /// `Diem::BurnCapability<Token>`, and a `Diem::Preburn<Token>` resource /// exists under `preburn_address`, with a non-zero `to_burn` field. After the successful execution /// of this transaction the `total_value` field in the /// `Diem::CurrencyInfo<Token>` resource published under `0xA550C18` will be /// decremented by the value of the `to_burn` field of the preburn resource /// under `preburn_address` immediately before this transaction, and the /// `to_burn` field of the preburn resource will have a zero value. /// /// # Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<Token>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being burned. `Token` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// | `amount` | `u64` | The amount to be burned. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_NOT_FOUND` | The `Diem::PreburnQueue<Token>` resource under `preburn_address` does not contain a preburn request with a value matching `amount`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN_QUEUE` | The account at `preburn_address` does not have a `Diem::PreburnQueue<Token>` resource published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::burn_txn_fees` /// * `TreasuryComplianceScripts::cancel_burn_with_amount` /// * `TreasuryComplianceScripts::preburn` pub fn encode_burn_with_amount_script_function( token: TypeTag, sliding_nonce: u64, preburn_address: AccountAddress, amount: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("burn_with_amount").to_owned(), vec![token], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&preburn_address).unwrap(), bcs::to_bytes(&amount).unwrap(), ], )) } /// # Summary /// Cancels and returns the coins held in the preburn area under /// `preburn_address`, which are equal to the `amount` specified in the transaction. Finds the first preburn /// resource with the matching amount and returns the funds to the `preburn_address`'s balance. /// Can only be successfully sent by an account with Treasury Compliance role. /// /// # Technical Description /// Cancels and returns all coins held in the `Diem::Preburn<Token>` resource under the `preburn_address` and /// return the funds to the `preburn_address` account's `DiemAccount::Balance<Token>`. /// The transaction must be sent by an `account` with a `Diem::BurnCapability<Token>` /// resource published under it. The account at `preburn_address` must have a /// `Diem::Preburn<Token>` resource published under it, and its value must be nonzero. The transaction removes /// the entire balance held in the `Diem::Preburn<Token>` resource, and returns it back to the account's /// `DiemAccount::Balance<Token>` under `preburn_address`. Due to this, the account at /// `preburn_address` must already have a balance in the `Token` currency published /// before this script is called otherwise the transaction will fail. /// /// # Events /// The successful execution of this transaction will emit: /// * A `Diem::CancelBurnEvent` on the event handle held in the `Diem::CurrencyInfo<Token>` /// resource's `burn_events` published under `0xA550C18`. /// * A `DiemAccount::ReceivedPaymentEvent` on the `preburn_address`'s /// `DiemAccount::DiemAccount` `received_events` event handle with both the `payer` and `payee` /// being `preburn_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currenty that burning is being cancelled for. `Token` must be an already-registered currency on-chain. | /// | `account` | `signer` | The signer of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// | `amount` | `u64` | The amount to be cancelled. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_NOT_FOUND` | The `Diem::PreburnQueue<Token>` resource under `preburn_address` does not contain a preburn request with a value matching `amount`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN_QUEUE` | The account at `preburn_address` does not have a `Diem::PreburnQueue<Token>` resource published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | The account at `preburn_address` doesn't have a balance resource for `Token`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds held in the prebun area would exceed the `account`'s account limits. | /// | `Errors::INVALID_STATE` | `DualAttestation::EPAYEE_COMPLIANCE_KEY_NOT_SET` | The `account` does not have a compliance key set on it but dual attestion checking was performed. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::burn_txn_fees` /// * `TreasuryComplianceScripts::burn_with_amount` /// * `TreasuryComplianceScripts::preburn` pub fn encode_cancel_burn_with_amount_script_function( token: TypeTag, preburn_address: AccountAddress, amount: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("cancel_burn_with_amount").to_owned(), vec![token], vec![ bcs::to_bytes(&preburn_address).unwrap(), bcs::to_bytes(&amount).unwrap(), ], )) } pub fn encode_community_transfer_script_function( destination: AccountAddress, unscaled_value: u64, memo: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TransferScripts").to_owned(), ), ident_str!("community_transfer").to_owned(), vec![], vec![ bcs::to_bytes(&destination).unwrap(), bcs::to_bytes(&unscaled_value).unwrap(), bcs::to_bytes(&memo).unwrap(), ], )) } pub fn encode_create_acc_user_script_function( challenge: Vec<u8>, solution: Vec<u8>, difficulty: u64, security: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountScripts").to_owned(), ), ident_str!("create_acc_user").to_owned(), vec![], vec![ bcs::to_bytes(&challenge).unwrap(), bcs::to_bytes(&solution).unwrap(), bcs::to_bytes(&difficulty).unwrap(), bcs::to_bytes(&security).unwrap(), ], )) } pub fn encode_create_acc_val_script_function( challenge: Vec<u8>, solution: Vec<u8>, difficulty: u64, security: u64, ow_human_name: Vec<u8>, op_address: AccountAddress, op_auth_key_prefix: Vec<u8>, op_consensus_pubkey: Vec<u8>, op_validator_network_addresses: Vec<u8>, op_fullnode_network_addresses: Vec<u8>, op_human_name: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountScripts").to_owned(), ), ident_str!("create_acc_val").to_owned(), vec![], vec![ bcs::to_bytes(&challenge).unwrap(), bcs::to_bytes(&solution).unwrap(), bcs::to_bytes(&difficulty).unwrap(), bcs::to_bytes(&security).unwrap(), bcs::to_bytes(&ow_human_name).unwrap(), bcs::to_bytes(&op_address).unwrap(), bcs::to_bytes(&op_auth_key_prefix).unwrap(), bcs::to_bytes(&op_consensus_pubkey).unwrap(), bcs::to_bytes(&op_validator_network_addresses).unwrap(), bcs::to_bytes(&op_fullnode_network_addresses).unwrap(), bcs::to_bytes(&op_human_name).unwrap(), ], )) } /// # Summary /// Creates a Child VASP account with its parent being the sending account of the transaction. /// The sender of the transaction must be a Parent VASP account. /// /// # Technical Description /// Creates a `ChildVASP` account for the sender `parent_vasp` at `child_address` with a balance of /// `child_initial_balance` in `CoinType` and an initial authentication key of /// `auth_key_prefix | child_address`. Authentication key prefixes, and how to construct them from an ed25519 public key is described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// If `add_all_currencies` is true, the child address will have a zero balance in all available /// currencies in the system. /// /// The new account will be a child account of the transaction sender, which must be a /// Parent VASP account. The child account will be recorded against the limit of /// child accounts of the creating Parent VASP account. /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `child_address`, /// and the `rold_id` field being `Roles::CHILD_VASP_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// Successful execution with a `child_initial_balance` greater than zero will additionaly emit: /// * A `DiemAccount::SentPaymentEvent` with the `payee` field being `child_address`. /// This is emitted on the Parent VASP's `DiemAccount::DiemAccount` `sent_events` handle. /// * A `DiemAccount::ReceivedPaymentEvent` with the `payer` field being the Parent VASP's address. /// This is emitted on the new Child VASPS's `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` that the child account should be created with. `CoinType` must be an already-registered currency on-chain. | /// | `parent_vasp` | `signer` | The reference of the sending account. Must be a Parent VASP account. | /// | `child_address` | `address` | Address of the to-be-created Child VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// | `child_initial_balance` | `u64` | The initial balance in `CoinType` to give the child account when it's created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | The `auth_key_prefix` was not of length 32. | /// | `Errors::REQUIRES_ROLE` | `Roles::EPARENT_VASP` | The sending account wasn't a Parent VASP account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `child_address` address is already taken. | /// | `Errors::LIMIT_EXCEEDED` | `VASP::ETOO_MANY_CHILDREN` | The sending account has reached the maximum number of allowed child accounts. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for the sending account has already been extracted. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | The sending account doesn't have a balance in `CoinType`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | The sending account doesn't have at least `child_initial_balance` of `CoinType` balance. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECANNOT_CREATE_AT_VM_RESERVED` | The `child_address` is the reserved address 0x0. | /// /// # Related Scripts /// * `AccountCreationScripts::create_parent_vasp_account` /// * `AccountAdministrationScripts::add_currency_to_account` /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::add_recovery_rotation_capability` /// * `AccountAdministrationScripts::create_recovery_address` pub fn encode_create_child_vasp_account_script_function( coin_type: TypeTag, child_address: AccountAddress, auth_key_prefix: Vec<u8>, add_all_currencies: bool, child_initial_balance: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountCreationScripts").to_owned(), ), ident_str!("create_child_vasp_account").to_owned(), vec![coin_type], vec![ bcs::to_bytes(&child_address).unwrap(), bcs::to_bytes(&auth_key_prefix).unwrap(), bcs::to_bytes(&add_all_currencies).unwrap(), bcs::to_bytes(&child_initial_balance).unwrap(), ], )) } /// # Summary /// Creates a Designated Dealer account with the provided information, and initializes it with /// default mint tiers. The transaction can only be sent by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Designated Dealer role at `addr` with authentication key /// `auth_key_prefix` | `addr` and a 0 balance of type `Currency`. If `add_all_currencies` is true, /// 0 balances for all available currencies in the system will also be added. This can only be /// invoked by an account with the TreasuryCompliance role. /// Authentication keys, prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// At the time of creation the account is also initialized with default mint tiers of (500_000, /// 5000_000, 50_000_000, 500_000_000), and preburn areas for each currency that is added to the /// account. /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `addr`, /// and the `rold_id` field being `Roles::DESIGNATED_DEALER_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` that the Designated Dealer should be initialized with. `Currency` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `addr` | `address` | Address of the to-be-created Designated Dealer account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Designated Dealer. | /// | `add_all_currencies` | `bool` | Whether to publish preburn, balance, and tier info resources for all known (SCS) currencies or just `Currency` when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `addr` address is already taken. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::tiered_mint` /// * `PaymentScripts::peer_to_peer_with_metadata` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` pub fn encode_create_designated_dealer_script_function( currency: TypeTag, sliding_nonce: u64, addr: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, add_all_currencies: bool, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountCreationScripts").to_owned(), ), ident_str!("create_designated_dealer").to_owned(), vec![currency], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&addr).unwrap(), bcs::to_bytes(&auth_key_prefix).unwrap(), bcs::to_bytes(&human_name).unwrap(), bcs::to_bytes(&add_all_currencies).unwrap(), ], )) } /// # Summary /// Publishes a `DiemId::DiemIdDomains` resource under a parent VASP account. /// The sending account must be a parent VASP account. /// /// # Technical Description /// Publishes a `DiemId::DiemIdDomains` resource under `account`. /// The The `DiemId::DiemIdDomains` resource's `domains` field is a vector /// of DiemIdDomain, and will be empty on at the end of processing this transaction. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::ALREADY_PUBLISHED` | `DiemId::EDIEM_ID_DOMAIN` | A `DiemId::DiemIdDomains` resource has already been published under `account`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EPARENT_VASP` | The sending `account` was not a parent VASP account. | pub fn encode_create_diem_id_domains_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("create_diem_id_domains").to_owned(), vec![], vec![], )) } /// # Summary /// Creates a Parent VASP account with the specified human name. Must be called by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Parent VASP role at `address` with authentication key /// `auth_key_prefix` | `new_account_address` and a 0 balance of type `CoinType`. If /// `add_all_currencies` is true, 0 balances for all available currencies in the system will /// also be added. This can only be invoked by an TreasuryCompliance account. /// `sliding_nonce` is a unique nonce for operation, see `SlidingNonce` for details. /// Authentication keys, prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `new_account_address`, /// and the `rold_id` field being `Roles::PARENT_VASP_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` currency that the Parent VASP account should be initialized with. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Parent VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Parent VASP. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `AccountCreationScripts::create_child_vasp_account` /// * `AccountAdministrationScripts::add_currency_to_account` /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::add_recovery_rotation_capability` /// * `AccountAdministrationScripts::create_recovery_address` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` pub fn encode_create_parent_vasp_account_script_function( coin_type: TypeTag, sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, add_all_currencies: bool, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountCreationScripts").to_owned(), ), ident_str!("create_parent_vasp_account").to_owned(), vec![coin_type], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&new_account_address).unwrap(), bcs::to_bytes(&auth_key_prefix).unwrap(), bcs::to_bytes(&human_name).unwrap(), bcs::to_bytes(&add_all_currencies).unwrap(), ], )) } /// # Summary /// Initializes the sending account as a recovery address that may be used by /// other accounts belonging to the same VASP as `account`. /// The sending account must be a VASP account, and can be either a child or parent VASP account. /// Multiple recovery addresses can exist for a single VASP, but accounts in /// each must be disjoint. /// /// # Technical Description /// Publishes a `RecoveryAddress::RecoveryAddress` resource under `account`. It then /// extracts the `DiemAccount::KeyRotationCapability` for `account` and adds /// it to the resource. After the successful execution of this transaction /// other accounts may add their key rotation to this resource so that `account` /// may be used as a recovery account for those accounts. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ENOT_A_VASP` | `account` is not a VASP account. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EKEY_ROTATION_DEPENDENCY_CYCLE` | A key rotation recovery cycle would be created by adding `account`'s key rotation capability. | /// | `Errors::ALREADY_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | A `RecoveryAddress::RecoveryAddress` resource has already been published under `account`. | /// /// # Related Scripts /// * `Script::add_recovery_rotation_capability` /// * `Script::rotate_authentication_key_with_recovery_address` pub fn encode_create_recovery_address_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("create_recovery_address").to_owned(), vec![], vec![], )) } pub fn encode_create_user_by_coin_tx_script_function( account: AccountAddress, authkey_prefix: Vec<u8>, unscaled_value: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountScripts").to_owned(), ), ident_str!("create_user_by_coin_tx").to_owned(), vec![], vec![ bcs::to_bytes(&account).unwrap(), bcs::to_bytes(&authkey_prefix).unwrap(), bcs::to_bytes(&unscaled_value).unwrap(), ], )) } /// # Summary /// Creates a Validator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorConfig::ValidatorConfig` resource with empty `config`, and /// `operator_account` fields. The `human_name` field of the /// `ValidatorConfig::ValidatorConfig` is set to the passed in `human_name`. /// This script does not add the validator to the validator set or the system, /// but only creates the account. /// Authentication keys, prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `new_account_address`, /// and the `rold_id` field being `Roles::VALIDATOR_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` pub fn encode_create_validator_account_script_function( sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountCreationScripts").to_owned(), ), ident_str!("create_validator_account").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&new_account_address).unwrap(), bcs::to_bytes(&auth_key_prefix).unwrap(), bcs::to_bytes(&human_name).unwrap(), ], )) } /// # Summary /// Creates a Validator Operator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator Operator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorOperatorConfig::ValidatorOperatorConfig` resource with the specified `human_name`. /// This script does not assign the validator operator to any validator accounts but only creates the account. /// Authentication key prefixes, and how to construct them from an ed25519 public key are described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// /// # Events /// Successful execution will emit: /// * A `DiemAccount::CreateAccountEvent` with the `created` field being `new_account_address`, /// and the `rold_id` field being `Roles::VALIDATOR_OPERATOR_ROLE_ID`. This is emitted on the /// `DiemAccount::AccountOperationsCapability` `creation_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` pub fn encode_create_validator_operator_account_script_function( sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountCreationScripts").to_owned(), ), ident_str!("create_validator_operator_account").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&new_account_address).unwrap(), bcs::to_bytes(&auth_key_prefix).unwrap(), bcs::to_bytes(&human_name).unwrap(), ], )) } pub fn encode_demo_e2e_script_function(world: u64) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("DemoScripts").to_owned(), ), ident_str!("demo_e2e").to_owned(), vec![], vec![bcs::to_bytes(&world).unwrap()], )) } /// # Summary /// Freezes the account at `address`. The sending account of this transaction /// must be the Treasury Compliance account. The account being frozen cannot be /// the Diem Root or Treasury Compliance account. After the successful /// execution of this transaction no transactions may be sent from the frozen /// account, and the frozen account may not send or receive coins. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `true` and emits a /// `AccountFreezing::FreezeAccountEvent`. The transaction sender must be the /// Treasury Compliance account, but the account at `to_freeze_account` must /// not be either `0xA550C18` (the Diem Root address), or `0xB1E55ED` (the /// Treasury Compliance address). Note that this is a per-account property /// e.g., freezing a Parent VASP will not effect the status any of its child /// accounts and vice versa. /// /// # Events /// Successful execution of this transaction will emit a `AccountFreezing::FreezeAccountEvent` on /// the `freeze_event_handle` held in the `AccountFreezing::FreezeEventsHolder` resource published /// under `0xA550C18` with the `frozen_address` being the `to_freeze_account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_freeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_TC` | `to_freeze_account` was the Treasury Compliance account (`0xB1E55ED`). | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_DIEM_ROOT` | `to_freeze_account` was the Diem Root account (`0xA550C18`). | /// /// # Related Scripts /// * `TreasuryComplianceScripts::unfreeze_account` pub fn encode_freeze_account_script_function( sliding_nonce: u64, to_freeze_account: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("freeze_account").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&to_freeze_account).unwrap(), ], )) } /// # Summary /// Initializes the Diem consensus config that is stored on-chain. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Initializes the `DiemConsensusConfig` on-chain config to empty and allows future updates from DiemRoot via /// `update_diem_consensus_config`. This doesn't emit a `DiemConfig::NewEpochEvent`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | pub fn encode_initialize_diem_consensus_config_script_function( sliding_nonce: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("SystemAdministrationScripts").to_owned(), ), ident_str!("initialize_diem_consensus_config").to_owned(), vec![], vec![bcs::to_bytes(&sliding_nonce).unwrap()], )) } pub fn encode_join_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorScripts").to_owned(), ), ident_str!("join").to_owned(), vec![], vec![], )) } pub fn encode_leave_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorScripts").to_owned(), ), ident_str!("leave").to_owned(), vec![], vec![], )) } pub fn encode_minerstate_commit_script_function( challenge: Vec<u8>, solution: Vec<u8>, difficulty: u64, security: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TowerStateScripts").to_owned(), ), ident_str!("minerstate_commit").to_owned(), vec![], vec![ bcs::to_bytes(&challenge).unwrap(), bcs::to_bytes(&solution).unwrap(), bcs::to_bytes(&difficulty).unwrap(), bcs::to_bytes(&security).unwrap(), ], )) } pub fn encode_minerstate_commit_by_operator_script_function( owner_address: AccountAddress, challenge: Vec<u8>, solution: Vec<u8>, difficulty: u64, security: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TowerStateScripts").to_owned(), ), ident_str!("minerstate_commit_by_operator").to_owned(), vec![], vec![ bcs::to_bytes(&owner_address).unwrap(), bcs::to_bytes(&challenge).unwrap(), bcs::to_bytes(&solution).unwrap(), bcs::to_bytes(&difficulty).unwrap(), bcs::to_bytes(&security).unwrap(), ], )) } pub fn encode_minerstate_helper_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TowerStateScripts").to_owned(), ), ident_str!("minerstate_helper").to_owned(), vec![], vec![], )) } /// A validator (Alice) can delegate the authority for the operation of an upgrade to another validator (Bob). When Oracle delegation happens, effectively the consensus voting power of Alice, is added to Bob only for the effect of calculating the preference on electing a stdlib binary. Whatever binary Bob proposes, Alice will also propose without needing to be submitting transactions. pub fn encode_ol_delegate_vote_script_function(dest: AccountAddress) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("OracleScripts").to_owned(), ), ident_str!("ol_delegate_vote").to_owned(), vec![], vec![bcs::to_bytes(&dest).unwrap()], )) } /// First Bob must have delegation enabled, which can be done with: pub fn encode_ol_enable_delegation_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("OracleScripts").to_owned(), ), ident_str!("ol_enable_delegation").to_owned(), vec![], vec![], )) } pub fn encode_ol_oracle_tx_script_function(id: u64, data: Vec<u8>) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("OracleScripts").to_owned(), ), ident_str!("ol_oracle_tx").to_owned(), vec![], vec![bcs::to_bytes(&id).unwrap(), bcs::to_bytes(&data).unwrap()], )) } pub fn encode_ol_reconfig_bulk_update_setup_script_function( alice: AccountAddress, bob: AccountAddress, carol: AccountAddress, sha: AccountAddress, ram: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorScripts").to_owned(), ), ident_str!("ol_reconfig_bulk_update_setup").to_owned(), vec![], vec![ bcs::to_bytes(&alice).unwrap(), bcs::to_bytes(&bob).unwrap(), bcs::to_bytes(&carol).unwrap(), bcs::to_bytes(&sha).unwrap(), bcs::to_bytes(&ram).unwrap(), ], )) } /// Alice can remove Bob as the delegate with this function. pub fn encode_ol_remove_delegation_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("OracleScripts").to_owned(), ), ident_str!("ol_remove_delegation").to_owned(), vec![], vec![], )) } /// # Summary /// Transfers a given number of coins in a specified currency from one account to another. /// Transfers over a specified amount defined on-chain that are between two different VASPs, or /// other accounts that have opted-in will be subject to on-chain checks to ensure the receiver has /// agreed to receive the coins. This transaction can be sent by any account that can hold a /// balance, and to any account that can hold a balance. Both accounts must hold balances in the /// currency being transacted. /// /// # Technical Description /// /// Transfers `amount` coins of type `Currency` from `payer` to `payee` with (optional) associated /// `metadata` and an (optional) `metadata_signature` on the message of the form /// `metadata` | `Signer::address_of(payer)` | `amount` | `DualAttestation::DOMAIN_SEPARATOR`, that /// has been signed by the `payee`'s private key associated with the `compliance_public_key` held in /// the `payee`'s `DualAttestation::Credential`. Both the `Signer::address_of(payer)` and `amount` fields /// in the `metadata_signature` must be BCS-encoded bytes, and `|` denotes concatenation. /// The `metadata` and `metadata_signature` parameters are only required if `amount` >= /// `DualAttestation::get_cur_microdiem_limit` XDX and `payer` and `payee` are distinct VASPs. /// However, a transaction sender can opt in to dual attestation even when it is not required /// (e.g., a DesignatedDealer -> VASP payment) by providing a non-empty `metadata_signature`. /// Standardized `metadata` BCS format can be found in `diem_types::transaction::metadata::Metadata`. /// /// # Events /// Successful execution of this script emits two events: /// * A `DiemAccount::SentPaymentEvent` on `payer`'s `DiemAccount::DiemAccount` `sent_events` handle; and /// * A `DiemAccount::ReceivedPaymentEvent` on `payee`'s `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being sent in this transaction. `Currency` must be an already-registered currency on-chain. | /// | `payer` | `signer` | The signer of the sending account that coins are being transferred from. | /// | `payee` | `address` | The address of the account the coins are being transferred to. | /// | `metadata` | `vector<u8>` | Optional metadata about this payment. | /// | `metadata_signature` | `vector<u8>` | Optional signature over `metadata` and payment information. See | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `payer` doesn't hold a balance in `Currency`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Currency`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECOIN_DEPOSIT_IS_ZERO` | `amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYEE_DOES_NOT_EXIST` | No account exists at the `payee` address. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | An account exists at `payee`, but it does not accept payments in `Currency`. | /// | `Errors::INVALID_STATE` | `AccountFreezing::EACCOUNT_FROZEN` | The `payee` account is frozen. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EMALFORMED_METADATA_SIGNATURE` | `metadata_signature` is not 64 bytes. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_METADATA_SIGNATURE` | `metadata_signature` does not verify on the against the `payee'`s `DualAttestation::Credential` `compliance_public_key` public key. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EWITHDRAWAL_EXCEEDS_LIMITS` | `payer` has exceeded its daily withdrawal limits for the backing coins of XDX. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | `payee` has exceeded its daily deposit limits for XDX. | /// /// # Related Scripts /// * `AccountCreationScripts::create_child_vasp_account` /// * `AccountCreationScripts::create_parent_vasp_account` /// * `AccountAdministrationScripts::add_currency_to_account` pub fn encode_peer_to_peer_with_metadata_script_function( currency: TypeTag, payee: AccountAddress, amount: u64, metadata: Vec<u8>, metadata_signature: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("PaymentScripts").to_owned(), ), ident_str!("peer_to_peer_with_metadata").to_owned(), vec![currency], vec![ bcs::to_bytes(&payee).unwrap(), bcs::to_bytes(&amount).unwrap(), bcs::to_bytes(&metadata).unwrap(), bcs::to_bytes(&metadata_signature).unwrap(), ], )) } /// # Summary /// Moves a specified number of coins in a given currency from the account's /// balance to its preburn area after which the coins may be burned. This /// transaction may be sent by any account that holds a balance and preburn area /// in the specified currency. /// /// # Technical Description /// Moves the specified `amount` of coins in `Token` currency from the sending `account`'s /// `DiemAccount::Balance<Token>` to the `Diem::Preburn<Token>` published under the same /// `account`. `account` must have both of these resources published under it at the start of this /// transaction in order for it to execute successfully. /// /// # Events /// Successful execution of this script emits two events: /// * `DiemAccount::SentPaymentEvent ` on `account`'s `DiemAccount::DiemAccount` `sent_events` /// handle with the `payee` and `payer` fields being `account`'s address; and /// * A `Diem::PreburnEvent` with `Token`'s currency code on the /// `Diem::CurrencyInfo<Token`'s `preburn_events` handle for `Token` and with /// `preburn_address` set to `account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being moved to the preburn area. `Token` must be an already-registered currency on-chain. | /// | `account` | `signer` | The signer of the sending account. | /// | `amount` | `u64` | The amount in `Token` to be moved to the preburn area. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for `account` has already been extracted. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `account` doesn't hold a balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | `account` doesn't have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_OCCUPIED` | The `value` field in the `Diem::Preburn<Token>` resource under the sender is non-zero. | /// | `Errors::NOT_PUBLISHED` | `Roles::EROLE_ID` | The `account` did not have a role assigned to it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDESIGNATED_DEALER` | The `account` did not have the role of DesignatedDealer. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::cancel_burn_with_amount` /// * `TreasuryComplianceScripts::burn_with_amount` /// * `TreasuryComplianceScripts::burn_txn_fees` pub fn encode_preburn_script_function(token: TypeTag, amount: u64) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("preburn").to_owned(), vec![token], vec![bcs::to_bytes(&amount).unwrap()], )) } /// # Summary /// Rotates the authentication key of the sending account to the newly-specified ed25519 public key and /// publishes a new shared authentication key derived from that public key under the sender's account. /// Any account can send this transaction. /// /// # Technical Description /// Rotates the authentication key of the sending account to the /// [authentication key derived from `public_key`](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys) /// and publishes a `SharedEd25519PublicKey::SharedEd25519PublicKey` resource /// containing the 32-byte ed25519 `public_key` and the `DiemAccount::KeyRotationCapability` for /// `account` under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | A valid 32-byte Ed25519 public key for `account`'s authentication key to be rotated to and stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability` resource. | /// | `Errors::ALREADY_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | The `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is already published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_shared_ed25519_public_key` pub fn encode_publish_shared_ed25519_public_key_script_function( public_key: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("publish_shared_ed25519_public_key").to_owned(), vec![], vec![bcs::to_bytes(&public_key).unwrap()], )) } /// # Summary /// Updates a validator's configuration. This does not reconfigure the system and will not update /// the configuration in the validator set that is seen by other validators in the network. Can /// only be successfully sent by a Validator Operator account that is already registered with a /// validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It does not emit a `DiemConfig::NewEpochEvent` /// so the copy of this config held in the validator set will not be updated, and the changes are /// only "locally" under the `validator_account` account address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `signer` | Signer of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` pub fn encode_register_validator_config_script_function( validator_account: AccountAddress, consensus_pubkey: Vec<u8>, validator_network_addresses: Vec<u8>, fullnode_network_addresses: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorAdministrationScripts").to_owned(), ), ident_str!("register_validator_config").to_owned(), vec![], vec![ bcs::to_bytes(&validator_account).unwrap(), bcs::to_bytes(&consensus_pubkey).unwrap(), bcs::to_bytes(&validator_network_addresses).unwrap(), bcs::to_bytes(&fullnode_network_addresses).unwrap(), ], )) } /// # Summary /// Remove a DiemID domain from parent VASP account. The transaction can only be sent by /// the Treasury Compliance account. /// /// # Technical Description /// Removes a `DiemId::DiemIdDomain` from the `domains` field of the `DiemId::DiemIdDomains` resource published under /// account with `address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `address` | `address` | The `address` of parent VASP account that will update its domains. | /// | `domain` | `vector<u8>` | The domain name. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAIN_MANAGER` | The `DiemId::DiemIdDomainManager` resource is not yet published under the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `DiemId::EDIEM_ID_DOMAINS_NOT_PUBLISHED` | `address` does not have a `DiemId::DiemIdDomains` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EINVALID_DIEM_ID_DOMAIN` | The `domain` is greater in length than `DiemId::DOMAIN_LENGTH`. | /// | `Errors::INVALID_ARGUMENT` | `DiemId::EDOMAIN_NOT_FOUND` | The `domain` does not exist in the list of `DiemId::DiemIdDomain`s in the `DiemId::DiemIdDomains` resource published under `address`. | pub fn encode_remove_diem_id_domain_script_function( address: AccountAddress, domain: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("remove_diem_id_domain").to_owned(), vec![], vec![ bcs::to_bytes(&address).unwrap(), bcs::to_bytes(&domain).unwrap(), ], )) } /// # Summary /// This script removes a validator account from the validator set, and triggers a reconfiguration /// of the system to remove the validator from the system. This transaction can only be /// successfully called by the Diem Root account. /// /// # Technical Description /// This script removes the account at `validator_address` from the validator set. This transaction /// emits a `DiemConfig::NewEpochEvent` event. Once the reconfiguration triggered by this event /// has been performed, the account at `validator_address` is no longer considered to be a /// validator in the network. This transaction will fail if the validator at `validator_address` /// is not in the validator set. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be removed from the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::ENOT_AN_ACTIVE_VALIDATOR` | The validator to be removed is not in the validator set. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` pub fn encode_remove_validator_and_reconfigure_script_function( sliding_nonce: u64, validator_name: Vec<u8>, validator_address: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorAdministrationScripts").to_owned(), ), ident_str!("remove_validator_and_reconfigure").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&validator_name).unwrap(), bcs::to_bytes(&validator_address).unwrap(), ], )) } /// # Summary /// Rotates the `account`'s authentication key to the supplied new authentication key. May be sent by any account. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` /// field to `new_key`. `new_key` must be a valid authentication key that /// corresponds to an ed25519 public key as described [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys), /// and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `new_key` | `vector<u8>` | New authentication key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce_admin` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` pub fn encode_rotate_authentication_key_script_function(new_key: Vec<u8>) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("rotate_authentication_key").to_owned(), vec![], vec![bcs::to_bytes(&new_key).unwrap()], )) } /// # Summary /// Rotates the sender's authentication key to the supplied new authentication key. May be sent by /// any account that has a sliding nonce resource published under it (usually this is Treasury /// Compliance or Diem Root accounts). /// /// # Technical Description /// Rotates the `account`'s `DiemAccount::DiemAccount` `authentication_key` /// field to `new_key`. `new_key` must be a valid authentication key that /// corresponds to an ed25519 public key as described [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys), /// and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_key` | `vector<u8>` | New authentication key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce_admin` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` pub fn encode_rotate_authentication_key_with_nonce_script_function( sliding_nonce: u64, new_key: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("rotate_authentication_key_with_nonce").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&new_key).unwrap(), ], )) } /// # Summary /// Rotates the specified account's authentication key to the supplied new authentication key. May /// only be sent by the Diem Root account as a write set transaction. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid authentication key that corresponds to an ed25519 /// public key as described [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys), /// and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | The signer of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `signer` | Signer of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `new_key` | `vector<u8>` | New authentication key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce` /// * `AccountAdministrationScripts::rotate_authentication_key_with_recovery_address` pub fn encode_rotate_authentication_key_with_nonce_admin_script_function( sliding_nonce: u64, new_key: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("rotate_authentication_key_with_nonce_admin").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&new_key).unwrap(), ], )) } /// # Summary /// Rotates the authentication key of a specified account that is part of a recovery address to a /// new authentication key. Only used for accounts that are part of a recovery address (see /// `AccountAdministrationScripts::add_recovery_rotation_capability` for account restrictions). /// /// # Technical Description /// Rotates the authentication key of the `to_recover` account to `new_key` using the /// `DiemAccount::KeyRotationCapability` stored in the `RecoveryAddress::RecoveryAddress` resource /// published under `recovery_address`. `new_key` must be a valide authentication key as described /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys). /// This transaction can be sent either by the `to_recover` account, or by the account where the /// `RecoveryAddress::RecoveryAddress` resource is published that contains `to_recover`'s `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `recovery_address` | `address` | Address where `RecoveryAddress::RecoveryAddress` that holds `to_recover`'s `DiemAccount::KeyRotationCapability` is published. | /// | `to_recover` | `address` | The address of the account whose authentication key will be updated. | /// | `new_key` | `vector<u8>` | New authentication key to be used for the account at the `to_recover` address. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress::RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ECANNOT_ROTATE_KEY` | The address of `account` is not `recovery_address` or `to_recover`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EACCOUNT_NOT_RECOVERABLE` | `to_recover`'s `DiemAccount::KeyRotationCapability` is not in the `RecoveryAddress::RecoveryAddress` resource published under `recovery_address`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `AccountAdministrationScripts::rotate_authentication_key` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce` /// * `AccountAdministrationScripts::rotate_authentication_key_with_nonce_admin` pub fn encode_rotate_authentication_key_with_recovery_address_script_function( recovery_address: AccountAddress, to_recover: AccountAddress, new_key: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("rotate_authentication_key_with_recovery_address").to_owned(), vec![], vec![ bcs::to_bytes(&recovery_address).unwrap(), bcs::to_bytes(&to_recover).unwrap(), bcs::to_bytes(&new_key).unwrap(), ], )) } /// # Summary /// Updates the url used for off-chain communication, and the public key used to verify dual /// attestation on-chain. Transaction can be sent by any account that has dual attestation /// information published under it. In practice the only such accounts are Designated Dealers and /// Parent VASPs. /// /// # Technical Description /// Updates the `base_url` and `compliance_public_key` fields of the `DualAttestation::Credential` /// resource published under `account`. The `new_key` must be a valid ed25519 public key. /// /// # Events /// Successful execution of this transaction emits two events: /// * A `DualAttestation::ComplianceKeyRotationEvent` containing the new compliance public key, and /// the blockchain time at which the key was updated emitted on the `DualAttestation::Credential` /// `compliance_key_rotation_events` handle published under `account`; and /// * A `DualAttestation::BaseUrlRotationEvent` containing the new base url to be used for /// off-chain communication, and the blockchain time at which the url was updated emitted on the /// `DualAttestation::Credential` `base_url_rotation_events` handle published under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account of the transaction. | /// | `new_url` | `vector<u8>` | ASCII-encoded url to be used for off-chain communication with `account`. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for on-chain dual attestation checking. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DualAttestation::ECREDENTIAL` | A `DualAttestation::Credential` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_PUBLIC_KEY` | `new_key` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `AccountCreationScripts::create_parent_vasp_account` /// * `AccountCreationScripts::create_designated_dealer` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` pub fn encode_rotate_dual_attestation_info_script_function( new_url: Vec<u8>, new_key: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("rotate_dual_attestation_info").to_owned(), vec![], vec![ bcs::to_bytes(&new_url).unwrap(), bcs::to_bytes(&new_key).unwrap(), ], )) } /// # Summary /// Rotates the authentication key in a `SharedEd25519PublicKey`. This transaction can be sent by /// any account that has previously published a shared ed25519 public key using /// `AccountAdministrationScripts::publish_shared_ed25519_public_key`. /// /// # Technical Description /// `public_key` must be a valid ed25519 public key. This transaction first rotates the public key stored in `account`'s /// `SharedEd25519PublicKey::SharedEd25519PublicKey` resource to `public_key`, after which it /// rotates the `account`'s authentication key to the new authentication key derived from `public_key` as defined /// [here](https://developers.diem.com/docs/core/accounts/#addresses-authentication-keys-and-cryptographic-keys) /// using the `DiemAccount::KeyRotationCapability` stored in `account`'s `SharedEd25519PublicKey::SharedEd25519PublicKey`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | 32-byte Ed25519 public key. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | A `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `AccountAdministrationScripts::publish_shared_ed25519_public_key` pub fn encode_rotate_shared_ed25519_public_key_script_function( public_key: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("AccountAdministrationScripts").to_owned(), ), ident_str!("rotate_shared_ed25519_public_key").to_owned(), vec![], vec![bcs::to_bytes(&public_key).unwrap()], )) } /// # Summary /// Updates the gas constants stored on chain and used by the VM for gas /// metering. This transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the on-chain config holding the `DiemVMConfig` and emits a /// `DiemConfig::NewEpochEvent` to trigger a reconfiguration of the system. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `global_memory_per_byte_cost` | `u64` | The new cost to read global memory per-byte to be used for gas metering. | /// | `global_memory_per_byte_write_cost` | `u64` | The new cost to write global memory per-byte to be used for gas metering. | /// | `min_transaction_gas_units` | `u64` | The new flat minimum amount of gas required for any transaction. | /// | `large_transaction_cutoff` | `u64` | The new size over which an additional charge will be assessed for each additional byte. | /// | `intrinsic_gas_per_byte` | `u64` | The new number of units of gas that to be charged per-byte over the new `large_transaction_cutoff`. | /// | `maximum_number_of_gas_units` | `u64` | The new maximum number of gas units that can be set in a transaction. | /// | `min_price_per_gas_unit` | `u64` | The new minimum gas price that can be set for a transaction. | /// | `max_price_per_gas_unit` | `u64` | The new maximum gas price that can be set for a transaction. | /// | `max_transaction_size_in_bytes` | `u64` | The new maximum size of a transaction that can be processed. | /// | `gas_unit_scaling_factor` | `u64` | The new scaling factor to use when scaling between external and internal gas units. | /// | `default_account_size` | `u64` | The new default account size to use when assessing final costs for reads and writes to global storage. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_ARGUMENT` | `DiemVMConfig::EGAS_CONSTANT_INCONSISTENCY` | The provided gas constants are inconsistent. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | pub fn encode_set_gas_constants_script_function( sliding_nonce: u64, global_memory_per_byte_cost: u64, global_memory_per_byte_write_cost: u64, min_transaction_gas_units: u64, large_transaction_cutoff: u64, intrinsic_gas_per_byte: u64, maximum_number_of_gas_units: u64, min_price_per_gas_unit: u64, max_price_per_gas_unit: u64, max_transaction_size_in_bytes: u64, gas_unit_scaling_factor: u64, default_account_size: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("SystemAdministrationScripts").to_owned(), ), ident_str!("set_gas_constants").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&global_memory_per_byte_cost).unwrap(), bcs::to_bytes(&global_memory_per_byte_write_cost).unwrap(), bcs::to_bytes(&min_transaction_gas_units).unwrap(), bcs::to_bytes(&large_transaction_cutoff).unwrap(), bcs::to_bytes(&intrinsic_gas_per_byte).unwrap(), bcs::to_bytes(&maximum_number_of_gas_units).unwrap(), bcs::to_bytes(&min_price_per_gas_unit).unwrap(), bcs::to_bytes(&max_price_per_gas_unit).unwrap(), bcs::to_bytes(&max_transaction_size_in_bytes).unwrap(), bcs::to_bytes(&gas_unit_scaling_factor).unwrap(), bcs::to_bytes(&default_account_size).unwrap(), ], )) } /// # Summary /// Updates a validator's configuration, and triggers a reconfiguration of the system to update the /// validator set with this new validator configuration. Can only be successfully sent by a /// Validator Operator account that is already registered with a validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It then emits a `DiemConfig::NewEpochEvent` to /// trigger a reconfiguration of the system. This reconfiguration will update the validator set /// on-chain with the updated `ValidatorConfig::ValidatorConfig`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `signer` | Signer of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR_OPERATOR` | `validator_operator_account` does not have a Validator Operator role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::register_validator_config` pub fn encode_set_validator_config_and_reconfigure_script_function( validator_account: AccountAddress, consensus_pubkey: Vec<u8>, validator_network_addresses: Vec<u8>, fullnode_network_addresses: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorAdministrationScripts").to_owned(), ), ident_str!("set_validator_config_and_reconfigure").to_owned(), vec![], vec![ bcs::to_bytes(&validator_account).unwrap(), bcs::to_bytes(&consensus_pubkey).unwrap(), bcs::to_bytes(&validator_network_addresses).unwrap(), bcs::to_bytes(&fullnode_network_addresses).unwrap(), ], )) } /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by an account with /// Validator role. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the sending validator account. The account at `operator_account` address must have /// a Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` /// resource published under it. The sending `account` must be a Validator and have a /// `ValidatorConfig::ValidatorConfig` resource published under it. This script does not emit a /// `DiemConfig::NewEpochEvent` and no reconfiguration of the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | The signer of the sending account of the transaction. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator_with_nonce_admin` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` pub fn encode_set_validator_operator_script_function( operator_name: Vec<u8>, operator_account: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorAdministrationScripts").to_owned(), ), ident_str!("set_validator_operator").to_owned(), vec![], vec![ bcs::to_bytes(&operator_name).unwrap(), bcs::to_bytes(&operator_account).unwrap(), ], )) } /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by the Diem Root /// account as a write set transaction. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the validator `account`. The account at `operator_account` address must have a /// Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource /// published under it. The account represented by the `account` signer must be a Validator and /// have a `ValidatorConfig::ValidatorConfig` resource published under it. No reconfiguration of /// the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `signer` | Signer of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `signer` | Signer of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `AccountCreationScripts::create_validator_account` /// * `AccountCreationScripts::create_validator_operator_account` /// * `ValidatorAdministrationScripts::register_validator_config` /// * `ValidatorAdministrationScripts::remove_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::add_validator_and_reconfigure` /// * `ValidatorAdministrationScripts::set_validator_operator` /// * `ValidatorAdministrationScripts::set_validator_config_and_reconfigure` pub fn encode_set_validator_operator_with_nonce_admin_script_function( sliding_nonce: u64, operator_name: Vec<u8>, operator_account: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorAdministrationScripts").to_owned(), ), ident_str!("set_validator_operator_with_nonce_admin").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&operator_name).unwrap(), bcs::to_bytes(&operator_account).unwrap(), ], )) } pub fn encode_set_wallet_type_script_function(type_of: u8) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("WalletScripts").to_owned(), ), ident_str!("set_wallet_type").to_owned(), vec![], vec![bcs::to_bytes(&type_of).unwrap()], )) } /// # Summary /// Mints a specified number of coins in a currency to a Designated Dealer. The sending account /// must be the Treasury Compliance account, and coins can only be minted to a Designated Dealer /// account. /// /// # Technical Description /// Mints `mint_amount` of coins in the `CoinType` currency to Designated Dealer account at /// `designated_dealer_address`. The `tier_index` parameter specifies which tier should be used to /// check verify the off-chain approval policy, and is based in part on the on-chain tier values /// for the specific Designated Dealer, and the number of `CoinType` coins that have been minted to /// the dealer over the past 24 hours. Every Designated Dealer has 4 tiers for each currency that /// they support. The sending `tc_account` must be the Treasury Compliance account, and the /// receiver an authorized Designated Dealer account. /// /// # Events /// Successful execution of the transaction will emit two events: /// * A `Diem::MintEvent` with the amount and currency code minted is emitted on the /// `mint_event_handle` in the stored `Diem::CurrencyInfo<CoinType>` resource stored under /// `0xA550C18`; and /// * A `DesignatedDealer::ReceivedMintEvent` with the amount, currency code, and Designated /// Dealer's address is emitted on the `mint_event_handle` in the stored `DesignatedDealer::Dealer` /// resource published under the `designated_dealer_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being minted. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `designated_dealer_address` | `address` | The address of the Designated Dealer account being minted to. | /// | `mint_amount` | `u64` | The number of coins to be minted. | /// | `tier_index` | `u64` | [Deprecated] The mint tier index to use for the Designated Dealer account. Will be ignored | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_MINT_AMOUNT` | `mint_amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DesignatedDealer::EDEALER` | `DesignatedDealer::Dealer` or `DesignatedDealer::TierInfo<CoinType>` resource does not exist at `designated_dealer_address`. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EMINT_CAPABILITY` | `tc_account` does not have a `Diem::MintCapability<CoinType>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EMINTING_NOT_ALLOWED` | Minting is not currently allowed for `CoinType` coins. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds would exceed the `account`'s account limits. | /// /// # Related Scripts /// * `AccountCreationScripts::create_designated_dealer` /// * `PaymentScripts::peer_to_peer_with_metadata` /// * `AccountAdministrationScripts::rotate_dual_attestation_info` pub fn encode_tiered_mint_script_function( coin_type: TypeTag, sliding_nonce: u64, designated_dealer_address: AccountAddress, mint_amount: u64, tier_index: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("tiered_mint").to_owned(), vec![coin_type], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&designated_dealer_address).unwrap(), bcs::to_bytes(&mint_amount).unwrap(), bcs::to_bytes(&tier_index).unwrap(), ], )) } /// # Summary /// Unfreezes the account at `address`. The sending account of this transaction must be the /// Treasury Compliance account. After the successful execution of this transaction transactions /// may be sent from the previously frozen account, and coins may be sent and received. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `false` and emits a /// `AccountFreezing::UnFreezeAccountEvent`. The transaction sender must be the Treasury Compliance /// account. Note that this is a per-account property so unfreezing a Parent VASP will not effect /// the status any of its child accounts and vice versa. /// /// # Events /// Successful execution of this script will emit a `AccountFreezing::UnFreezeAccountEvent` with /// the `unfrozen_address` set the `to_unfreeze_account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_unfreeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::freeze_account` pub fn encode_unfreeze_account_script_function( sliding_nonce: u64, to_unfreeze_account: AccountAddress, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("unfreeze_account").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&to_unfreeze_account).unwrap(), ], )) } /// # Summary /// Updates the Diem consensus config that is stored on-chain and is used by the Consensus. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the `DiemConsensusConfig` on-chain config and emits a `DiemConfig::NewEpochEvent` to trigger /// a reconfiguration of the system. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `config` | `vector<u8>` | The serialized bytes of consensus config. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | pub fn encode_update_diem_consensus_config_script_function( sliding_nonce: u64, config: Vec<u8>, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("SystemAdministrationScripts").to_owned(), ), ident_str!("update_diem_consensus_config").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&config).unwrap(), ], )) } /// # Summary /// Updates the Diem major version that is stored on-chain and is used by the VM. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the `DiemVersion` on-chain config and emits a `DiemConfig::NewEpochEvent` to trigger /// a reconfiguration of the system. The `major` version that is passed in must be strictly greater /// than the current major version held on-chain. The VM reads this information and can use it to /// preserve backwards compatibility with previous major versions of the VM. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `major` | `u64` | The `major` version of the VM to be used from this transaction on. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | /// | `Errors::INVALID_ARGUMENT` | `DiemVersion::EINVALID_MAJOR_VERSION_NUMBER` | `major` is less-than or equal to the current major version stored on-chain. | pub fn encode_update_diem_version_script_function( sliding_nonce: u64, major: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("SystemAdministrationScripts").to_owned(), ), ident_str!("update_diem_version").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&major).unwrap(), ], )) } /// # Summary /// Update the dual attestation limit on-chain. Defined in terms of micro-XDX. The transaction can /// only be sent by the Treasury Compliance account. After this transaction all inter-VASP /// payments over this limit must be checked for dual attestation. /// /// # Technical Description /// Updates the `micro_xdx_limit` field of the `DualAttestation::Limit` resource published under /// `0xA550C18`. The amount is set in micro-XDX. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_micro_xdx_limit` | `u64` | The new dual attestation limit to be used on-chain. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::update_exchange_rate` /// * `TreasuryComplianceScripts::update_minting_ability` pub fn encode_update_dual_attestation_limit_script_function( sliding_nonce: u64, new_micro_xdx_limit: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("update_dual_attestation_limit").to_owned(), vec![], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&new_micro_xdx_limit).unwrap(), ], )) } /// # Summary /// Update the rough on-chain exchange rate between a specified currency and XDX (as a conversion /// to micro-XDX). The transaction can only be sent by the Treasury Compliance account. After this /// transaction the updated exchange rate will be used for normalization of gas prices, and for /// dual attestation checking. /// /// # Technical Description /// Updates the on-chain exchange rate from the given `Currency` to micro-XDX. The exchange rate /// is given by `new_exchange_rate_numerator/new_exchange_rate_denominator`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose exchange rate is being updated. `Currency` must be an already-registered currency on-chain. | /// | `dm_account` | `signer` | The signer of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for the transaction. | /// | `new_exchange_rate_numerator` | `u64` | The numerator for the new to micro-XDX exchange rate for `Currency`. | /// | `new_exchange_rate_denominator` | `u64` | The denominator for the new to micro-XDX exchange rate for `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dm_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `dm_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `dm_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::EDENOMINATOR` | `new_exchange_rate_denominator` is zero. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// | `Errors::LIMIT_EXCEEDED` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::update_dual_attestation_limit` /// * `TreasuryComplianceScripts::update_minting_ability` pub fn encode_update_exchange_rate_script_function( currency: TypeTag, sliding_nonce: u64, new_exchange_rate_numerator: u64, new_exchange_rate_denominator: u64, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("update_exchange_rate").to_owned(), vec![currency], vec![ bcs::to_bytes(&sliding_nonce).unwrap(), bcs::to_bytes(&new_exchange_rate_numerator).unwrap(), bcs::to_bytes(&new_exchange_rate_denominator).unwrap(), ], )) } /// # Summary /// Script to allow or disallow minting of new coins in a specified currency. This transaction can /// only be sent by the Treasury Compliance account. Turning minting off for a currency will have /// no effect on coins already in circulation, and coins may still be removed from the system. /// /// # Technical Description /// This transaction sets the `can_mint` field of the `Diem::CurrencyInfo<Currency>` resource /// published under `0xA550C18` to the value of `allow_minting`. Minting of coins if allowed if /// this field is set to `true` and minting of new coins in `Currency` is disallowed otherwise. /// This transaction needs to be sent by the Treasury Compliance account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose minting ability is being updated. `Currency` must be an already-registered currency on-chain. | /// | `account` | `signer` | Signer of the sending account. Must be the Diem Root account. | /// | `allow_minting` | `bool` | Whether to allow minting of new coins in `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | `Currency` is not a registered currency on-chain. | /// /// # Related Scripts /// * `TreasuryComplianceScripts::update_dual_attestation_limit` /// * `TreasuryComplianceScripts::update_exchange_rate` pub fn encode_update_minting_ability_script_function( currency: TypeTag, allow_minting: bool, ) -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("TreasuryComplianceScripts").to_owned(), ), ident_str!("update_minting_ability").to_owned(), vec![currency], vec![bcs::to_bytes(&allow_minting).unwrap()], )) } pub fn encode_val_add_self_script_function() -> TransactionPayload { TransactionPayload::ScriptFunction(ScriptFunction::new( ModuleId::new( AccountAddress::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]), ident_str!("ValidatorScripts").to_owned(), ), ident_str!("val_add_self").to_owned(), vec![], vec![], )) } /// # Summary /// Adds a zero `Currency` balance to the sending `account`. This will enable `account` to /// send, receive, and hold `Diem::Diem<Currency>` coins. This transaction can be /// successfully sent by any account that is allowed to hold balances /// (e.g., VASP, Designated Dealer). /// /// # Technical Description /// After the successful execution of this transaction the sending account will have a /// `DiemAccount::Balance<Currency>` resource with zero balance published under it. Only /// accounts that can hold balances can send this transaction, the sending account cannot /// already have a `DiemAccount::Balance<Currency>` published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being added to the sending account of the transaction. `Currency` must be an already-registered currency on-chain. | /// | `account` | `&signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EROLE_CANT_STORE_BALANCE` | The sending `account`'s role does not permit balances. | /// | `Errors::ALREADY_PUBLISHED` | `DiemAccount::EADD_EXISTING_CURRENCY` | A balance for `Currency` is already published under the sending `account`. | /// /// # Related Scripts /// * `Script::create_child_vasp_account` /// * `Script::create_parent_vasp_account` /// * `Script::peer_to_peer_with_metadata` pub fn encode_add_currency_to_account_script(currency: TypeTag) -> Script { Script::new( ADD_CURRENCY_TO_ACCOUNT_CODE.to_vec(), vec![currency], vec![], ) } /// # Summary /// Stores the sending accounts ability to rotate its authentication key with a designated recovery /// account. Both the sending and recovery accounts need to belong to the same VASP and /// both be VASP accounts. After this transaction both the sending account and the /// specified recovery account can rotate the sender account's authentication key. /// /// # Technical Description /// Adds the `DiemAccount::KeyRotationCapability` for the sending account /// (`to_recover_account`) to the `RecoveryAddress::RecoveryAddress` resource under /// `recovery_address`. After this transaction has been executed successfully the account at /// `recovery_address` and the `to_recover_account` may rotate the authentication key of /// `to_recover_account` (the sender of this transaction). /// /// The sending account of this transaction (`to_recover_account`) must not have previously given away its unique key /// rotation capability, and must be a VASP account. The account at `recovery_address` /// must also be a VASP account belonging to the same VASP as the `to_recover_account`. /// Additionally the account at `recovery_address` must have already initialized itself as /// a recovery account address using the `Script::create_recovery_address` transaction script. /// /// The sending account's (`to_recover_account`) key rotation capability is /// removed in this transaction and stored in the `RecoveryAddress::RecoveryAddress` /// resource stored under the account at `recovery_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `to_recover_account` | `&signer` | The signer reference of the sending account of this transaction. | /// | `recovery_address` | `address` | The account address where the `to_recover_account`'s `DiemAccount::KeyRotationCapability` will be stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `to_recover_account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EINVALID_KEY_ROTATION_DELEGATION` | `to_recover_account` and `recovery_address` do not belong to the same VASP. | /// | `Errors::LIMIT_EXCEEDED` | ` RecoveryAddress::EMAX_KEYS_REGISTERED` | `RecoveryAddress::MAX_REGISTERED_KEYS` have already been registered with this `recovery_address`. | /// /// # Related Scripts /// * `Script::create_recovery_address` /// * `Script::rotate_authentication_key_with_recovery_address` pub fn encode_add_recovery_rotation_capability_script(recovery_address: AccountAddress) -> Script { Script::new( ADD_RECOVERY_ROTATION_CAPABILITY_CODE.to_vec(), vec![], vec![TransactionArgument::Address(recovery_address)], ) } /// # Summary /// Adds a validator account to the validator set, and triggers a /// reconfiguration of the system to admit the account to the validator set for the system. This /// transaction can only be successfully called by the Diem Root account. /// /// # Technical Description /// This script adds the account at `validator_address` to the validator set. /// This transaction emits a `DiemConfig::NewEpochEvent` event and triggers a /// reconfiguration. Once the reconfiguration triggered by this script's /// execution has been performed, the account at the `validator_address` is /// considered to be a validator in the network. /// /// This transaction script will fail if the `validator_address` address is already in the validator set /// or does not have a `ValidatorConfig::ValidatorConfig` resource already published under it. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be added to the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EINVALID_PROSPECTIVE_VALIDATOR` | The validator to be added does not have a `ValidatorConfig::ValidatorConfig` resource published under it, or its `config` field is empty. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::EALREADY_A_VALIDATOR` | The `validator_address` account is already a registered validator. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` pub fn encode_add_validator_and_reconfigure_script( sliding_nonce: u64, validator_name: Vec<u8>, validator_address: AccountAddress, ) -> Script { Script::new( ADD_VALIDATOR_AND_RECONFIGURE_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U8Vector(validator_name), TransactionArgument::Address(validator_address), ], ) } /// # Summary /// Burns all coins held in the preburn resource at the specified /// preburn address and removes them from the system. The sending account must /// be the Treasury Compliance account. /// The account that holds the preburn resource will normally be a Designated /// Dealer, but there are no enforced requirements that it be one. /// /// # Technical Description /// This transaction permanently destroys all the coins of `Token` type /// stored in the `Diem::Preburn<Token>` resource published under the /// `preburn_address` account address. /// /// This transaction will only succeed if the sending `account` has a /// `Diem::BurnCapability<Token>`, and a `Diem::Preburn<Token>` resource /// exists under `preburn_address`, with a non-zero `to_burn` field. After the successful execution /// of this transaction the `total_value` field in the /// `Diem::CurrencyInfo<Token>` resource published under `0xA550C18` will be /// decremented by the value of the `to_burn` field of the preburn resource /// under `preburn_address` immediately before this transaction, and the /// `to_burn` field of the preburn resource will have a zero value. /// /// ## Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<Token>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being burned. `Token` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | The account at `preburn_address` does not have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_EMPTY` | The `Diem::Preburn<Token>` resource is empty (has a value of 0). | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// /// # Related Scripts /// * `Script::burn_txn_fees` /// * `Script::cancel_burn` /// * `Script::preburn` pub fn encode_burn_script( token: TypeTag, sliding_nonce: u64, preburn_address: AccountAddress, ) -> Script { Script::new( BURN_CODE.to_vec(), vec![token], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(preburn_address), ], ) } /// # Summary /// Burns the transaction fees collected in the `CoinType` currency so that the /// Diem association may reclaim the backing coins off-chain. May only be sent /// by the Treasury Compliance account. /// /// # Technical Description /// Burns the transaction fees collected in `CoinType` so that the /// association may reclaim the backing coins. Once this transaction has executed /// successfully all transaction fees that will have been collected in /// `CoinType` since the last time this script was called with that specific /// currency. Both `balance` and `preburn` fields in the /// `TransactionFee::TransactionFee<CoinType>` resource published under the `0xB1E55ED` /// account address will have a value of 0 after the successful execution of this script. /// /// ## Events /// The successful execution of this transaction will emit a `Diem::BurnEvent` on the event handle /// held in the `Diem::CurrencyInfo<CoinType>` resource's `burn_events` published under /// `0xA550C18`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being added to the sending account of the transaction. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `TransactionFee::ETRANSACTION_FEE` | `CoinType` is not an accepted transaction fee currency. | /// | `Errors::INVALID_ARGUMENT` | `Diem::ECOIN` | The collected fees in `CoinType` are zero. | /// /// # Related Scripts /// * `Script::burn` /// * `Script::cancel_burn` pub fn encode_burn_txn_fees_script(coin_type: TypeTag) -> Script { Script::new(BURN_TXN_FEES_CODE.to_vec(), vec![coin_type], vec![]) } /// # Summary /// Cancels and returns all coins held in the preburn area under /// `preburn_address` and returns the funds to the `preburn_address`'s balance. /// Can only be successfully sent by an account with Treasury Compliance role. /// /// # Technical Description /// Cancels and returns all coins held in the `Diem::Preburn<Token>` resource under the `preburn_address` and /// return the funds to the `preburn_address` account's `DiemAccount::Balance<Token>`. /// The transaction must be sent by an `account` with a `Diem::BurnCapability<Token>` /// resource published under it. The account at `preburn_address` must have a /// `Diem::Preburn<Token>` resource published under it, and its value must be nonzero. The transaction removes /// the entire balance held in the `Diem::Preburn<Token>` resource, and returns it back to the account's /// `DiemAccount::Balance<Token>` under `preburn_address`. Due to this, the account at /// `preburn_address` must already have a balance in the `Token` currency published /// before this script is called otherwise the transaction will fail. /// /// ## Events /// The successful execution of this transaction will emit: /// * A `Diem::CancelBurnEvent` on the event handle held in the `Diem::CurrencyInfo<Token>` /// resource's `burn_events` published under `0xA550C18`. /// * A `DiemAccount::ReceivedPaymentEvent` on the `preburn_address`'s /// `DiemAccount::DiemAccount` `received_events` event handle with both the `payer` and `payee` /// being `preburn_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currenty that burning is being cancelled for. `Token` must be an already-registered currency on-chain. | /// | `account` | `&signer` | The signer reference of the sending account of this transaction, must have a burn capability for `Token` published under it. | /// | `preburn_address` | `address` | The address where the coins to-be-burned are currently held. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EBURN_CAPABILITY` | The sending `account` does not have a `Diem::BurnCapability<Token>` published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | The account at `preburn_address` does not have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The specified `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECOIN_DEPOSIT_IS_ZERO` | The value held in the preburn resource was zero. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | The account at `preburn_address` doesn't have a balance resource for `Token`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds held in the prebun area would exceed the `account`'s account limits. | /// | `Errors::INVALID_STATE` | `DualAttestation::EPAYEE_COMPLIANCE_KEY_NOT_SET` | The `account` does not have a compliance key set on it but dual attestion checking was performed. | /// /// # Related Scripts /// * `Script::burn_txn_fees` /// * `Script::burn` /// * `Script::preburn` pub fn encode_cancel_burn_script(token: TypeTag, preburn_address: AccountAddress) -> Script { Script::new( CANCEL_BURN_CODE.to_vec(), vec![token], vec![TransactionArgument::Address(preburn_address)], ) } /// # Summary /// Creates a Child VASP account with its parent being the sending account of the transaction. /// The sender of the transaction must be a Parent VASP account. /// /// # Technical Description /// Creates a `ChildVASP` account for the sender `parent_vasp` at `child_address` with a balance of /// `child_initial_balance` in `CoinType` and an initial authentication key of /// `auth_key_prefix | child_address`. /// /// If `add_all_currencies` is true, the child address will have a zero balance in all available /// currencies in the system. /// /// The new account will be a child account of the transaction sender, which must be a /// Parent VASP account. The child account will be recorded against the limit of /// child accounts of the creating Parent VASP account. /// /// ## Events /// Successful execution with a `child_initial_balance` greater than zero will emit: /// * A `DiemAccount::SentPaymentEvent` with the `payer` field being the Parent VASP's address, /// and payee field being `child_address`. This is emitted on the Parent VASP's /// `DiemAccount::DiemAccount` `sent_events` handle. /// * A `DiemAccount::ReceivedPaymentEvent` with the `payer` field being the Parent VASP's address, /// and payee field being `child_address`. This is emitted on the new Child VASPS's /// `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` that the child account should be created with. `CoinType` must be an already-registered currency on-chain. | /// | `parent_vasp` | `&signer` | The signer reference of the sending account. Must be a Parent VASP account. | /// | `child_address` | `address` | Address of the to-be-created Child VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// | `child_initial_balance` | `u64` | The initial balance in `CoinType` to give the child account when it's created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | The `auth_key_prefix` was not of length 32. | /// | `Errors::REQUIRES_ROLE` | `Roles::EPARENT_VASP` | The sending account wasn't a Parent VASP account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `child_address` address is already taken. | /// | `Errors::LIMIT_EXCEEDED` | `VASP::ETOO_MANY_CHILDREN` | The sending account has reached the maximum number of allowed child accounts. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for the sending account has already been extracted. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | The sending account doesn't have a balance in `CoinType`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | The sending account doesn't have at least `child_initial_balance` of `CoinType` balance. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECANNOT_CREATE_AT_VM_RESERVED` | The `child_address` is the reserved address 0x0. | /// /// # Related Scripts /// * `Script::create_parent_vasp_account` /// * `Script::add_currency_to_account` /// * `Script::rotate_authentication_key` /// * `Script::add_recovery_rotation_capability` /// * `Script::create_recovery_address` pub fn encode_create_child_vasp_account_script( coin_type: TypeTag, child_address: AccountAddress, auth_key_prefix: Vec<u8>, add_all_currencies: bool, child_initial_balance: u64, ) -> Script { Script::new( CREATE_CHILD_VASP_ACCOUNT_CODE.to_vec(), vec![coin_type], vec![ TransactionArgument::Address(child_address), TransactionArgument::U8Vector(auth_key_prefix), TransactionArgument::Bool(add_all_currencies), TransactionArgument::U64(child_initial_balance), ], ) } /// # Summary /// Creates a Designated Dealer account with the provided information, and initializes it with /// default mint tiers. The transaction can only be sent by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Designated Dealer role at `addr` with authentication key /// `auth_key_prefix` | `addr` and a 0 balance of type `Currency`. If `add_all_currencies` is true, /// 0 balances for all available currencies in the system will also be added. This can only be /// invoked by an account with the TreasuryCompliance role. /// /// At the time of creation the account is also initialized with default mint tiers of (500_000, /// 5000_000, 50_000_000, 500_000_000), and preburn areas for each currency that is added to the /// account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` that the Designated Dealer should be initialized with. `Currency` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `addr` | `address` | Address of the to-be-created Designated Dealer account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Designated Dealer. | /// | `add_all_currencies` | `bool` | Whether to publish preburn, balance, and tier info resources for all known (SCS) currencies or just `Currency` when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Currency` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `addr` address is already taken. | /// /// # Related Scripts /// * `Script::tiered_mint` /// * `Script::peer_to_peer_with_metadata` /// * `Script::rotate_dual_attestation_info` pub fn encode_create_designated_dealer_script( currency: TypeTag, sliding_nonce: u64, addr: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, add_all_currencies: bool, ) -> Script { Script::new( CREATE_DESIGNATED_DEALER_CODE.to_vec(), vec![currency], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(addr), TransactionArgument::U8Vector(auth_key_prefix), TransactionArgument::U8Vector(human_name), TransactionArgument::Bool(add_all_currencies), ], ) } /// # Summary /// Creates a Parent VASP account with the specified human name. Must be called by the Treasury Compliance account. /// /// # Technical Description /// Creates an account with the Parent VASP role at `address` with authentication key /// `auth_key_prefix` | `new_account_address` and a 0 balance of type `CoinType`. If /// `add_all_currencies` is true, 0 balances for all available currencies in the system will /// also be added. This can only be invoked by an TreasuryCompliance account. /// `sliding_nonce` is a unique nonce for operation, see `SlidingNonce` for details. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` currency that the Parent VASP account should be initialized with. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Parent VASP account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the Parent VASP. | /// | `add_all_currencies` | `bool` | Whether to publish balance resources for all known currencies when the account is created. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `CoinType` is not a registered currency on-chain. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `Script::create_child_vasp_account` /// * `Script::add_currency_to_account` /// * `Script::rotate_authentication_key` /// * `Script::add_recovery_rotation_capability` /// * `Script::create_recovery_address` /// * `Script::rotate_dual_attestation_info` pub fn encode_create_parent_vasp_account_script( coin_type: TypeTag, sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, add_all_currencies: bool, ) -> Script { Script::new( CREATE_PARENT_VASP_ACCOUNT_CODE.to_vec(), vec![coin_type], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(new_account_address), TransactionArgument::U8Vector(auth_key_prefix), TransactionArgument::U8Vector(human_name), TransactionArgument::Bool(add_all_currencies), ], ) } /// # Summary /// Initializes the sending account as a recovery address that may be used by /// the VASP that it belongs to. The sending account must be a VASP account. /// Multiple recovery addresses can exist for a single VASP, but accounts in /// each must be disjoint. /// /// # Technical Description /// Publishes a `RecoveryAddress::RecoveryAddress` resource under `account`. It then /// extracts the `DiemAccount::KeyRotationCapability` for `account` and adds /// it to the resource. After the successful execution of this transaction /// other accounts may add their key rotation to this resource so that `account` /// may be used as a recovery account for those accounts. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer of the sending account of the transaction. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ENOT_A_VASP` | `account` is not a VASP account. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EKEY_ROTATION_DEPENDENCY_CYCLE` | A key rotation recovery cycle would be created by adding `account`'s key rotation capability. | /// | `Errors::ALREADY_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | A `RecoveryAddress::RecoveryAddress` resource has already been published under `account`. | /// /// # Related Scripts /// * `Script::add_recovery_rotation_capability` /// * `Script::rotate_authentication_key_with_recovery_address` pub fn encode_create_recovery_address_script() -> Script { Script::new(CREATE_RECOVERY_ADDRESS_CODE.to_vec(), vec![], vec![]) } /// # Summary /// Creates a Validator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorConfig::ValidatorConfig` resource with empty `config`, and /// `operator_account` fields. The `human_name` field of the /// `ValidatorConfig::ValidatorConfig` is set to the passed in `human_name`. /// This script does not add the validator to the validator set or the system, /// but only creates the account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `Script::add_validator_and_reconfigure` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` pub fn encode_create_validator_account_script( sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, ) -> Script { Script::new( CREATE_VALIDATOR_ACCOUNT_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(new_account_address), TransactionArgument::U8Vector(auth_key_prefix), TransactionArgument::U8Vector(human_name), ], ) } /// # Summary /// Creates a Validator Operator account. This transaction can only be sent by the Diem /// Root account. /// /// # Technical Description /// Creates an account with a Validator Operator role at `new_account_address`, with authentication key /// `auth_key_prefix` | `new_account_address`. It publishes a /// `ValidatorOperatorConfig::ValidatorOperatorConfig` resource with the specified `human_name`. /// This script does not assign the validator operator to any validator accounts but only creates the account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_account_address` | `address` | Address of the to-be-created Validator account. | /// | `auth_key_prefix` | `vector<u8>` | The authentication key prefix that will be used initially for the newly created account. | /// | `human_name` | `vector<u8>` | ASCII-encoded human name for the validator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::ALREADY_PUBLISHED` | `Roles::EROLE_ID` | The `new_account_address` address is already taken. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::add_validator_and_reconfigure` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` pub fn encode_create_validator_operator_account_script( sliding_nonce: u64, new_account_address: AccountAddress, auth_key_prefix: Vec<u8>, human_name: Vec<u8>, ) -> Script { Script::new( CREATE_VALIDATOR_OPERATOR_ACCOUNT_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(new_account_address), TransactionArgument::U8Vector(auth_key_prefix), TransactionArgument::U8Vector(human_name), ], ) } /// # Summary /// Freezes the account at `address`. The sending account of this transaction /// must be the Treasury Compliance account. The account being frozen cannot be /// the Diem Root or Treasury Compliance account. After the successful /// execution of this transaction no transactions may be sent from the frozen /// account, and the frozen account may not send or receive coins. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `true` and emits a /// `AccountFreezing::FreezeAccountEvent`. The transaction sender must be the /// Treasury Compliance account, but the account at `to_freeze_account` must /// not be either `0xA550C18` (the Diem Root address), or `0xB1E55ED` (the /// Treasury Compliance address). Note that this is a per-account property /// e.g., freezing a Parent VASP will not effect the status any of its child /// accounts and vice versa. /// /// ## Events /// Successful execution of this transaction will emit a `AccountFreezing::FreezeAccountEvent` on /// the `freeze_event_handle` held in the `AccountFreezing::FreezeEventsHolder` resource published /// under `0xA550C18` with the `frozen_address` being the `to_freeze_account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_freeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_TC` | `to_freeze_account` was the Treasury Compliance account (`0xB1E55ED`). | /// | `Errors::INVALID_ARGUMENT` | `AccountFreezing::ECANNOT_FREEZE_DIEM_ROOT` | `to_freeze_account` was the Diem Root account (`0xA550C18`). | /// /// # Related Scripts /// * `Script::unfreeze_account` pub fn encode_freeze_account_script( sliding_nonce: u64, to_freeze_account: AccountAddress, ) -> Script { Script::new( FREEZE_ACCOUNT_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(to_freeze_account), ], ) } /// # Summary /// Transfers a given number of coins in a specified currency from one account to another. /// Transfers over a specified amount defined on-chain that are between two different VASPs, or /// other accounts that have opted-in will be subject to on-chain checks to ensure the receiver has /// agreed to receive the coins. This transaction can be sent by any account that can hold a /// balance, and to any account that can hold a balance. Both accounts must hold balances in the /// currency being transacted. /// /// # Technical Description /// /// Transfers `amount` coins of type `Currency` from `payer` to `payee` with (optional) associated /// `metadata` and an (optional) `metadata_signature` on the message /// `metadata` | `Signer::address_of(payer)` | `amount` | `DualAttestation::DOMAIN_SEPARATOR`. /// The `metadata` and `metadata_signature` parameters are only required if `amount` >= /// `DualAttestation::get_cur_microdiem_limit` XDX and `payer` and `payee` are distinct VASPs. /// However, a transaction sender can opt in to dual attestation even when it is not required /// (e.g., a DesignatedDealer -> VASP payment) by providing a non-empty `metadata_signature`. /// Standardized `metadata` BCS format can be found in `diem_types::transaction::metadata::Metadata`. /// /// ## Events /// Successful execution of this script emits two events: /// * A `DiemAccount::SentPaymentEvent` on `payer`'s `DiemAccount::DiemAccount` `sent_events` handle; and /// * A `DiemAccount::ReceivedPaymentEvent` on `payee`'s `DiemAccount::DiemAccount` `received_events` handle. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` being sent in this transaction. `Currency` must be an already-registered currency on-chain. | /// | `payer` | `&signer` | The signer reference of the sending account that coins are being transferred from. | /// | `payee` | `address` | The address of the account the coins are being transferred to. | /// | `metadata` | `vector<u8>` | Optional metadata about this payment. | /// | `metadata_signature` | `vector<u8>` | Optional signature over `metadata` and payment information. See | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `payer` doesn't hold a balance in `Currency`. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Currency`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::ECOIN_DEPOSIT_IS_ZERO` | `amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYEE_DOES_NOT_EXIST` | No account exists at the `payee` address. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EPAYEE_CANT_ACCEPT_CURRENCY_TYPE` | An account exists at `payee`, but it does not accept payments in `Currency`. | /// | `Errors::INVALID_STATE` | `AccountFreezing::EACCOUNT_FROZEN` | The `payee` account is frozen. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EMALFORMED_METADATA_SIGNATURE` | `metadata_signature` is not 64 bytes. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_METADATA_SIGNATURE` | `metadata_signature` does not verify on the against the `payee'`s `DualAttestation::Credential` `compliance_public_key` public key. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EWITHDRAWAL_EXCEEDS_LIMITS` | `payer` has exceeded its daily withdrawal limits for the backing coins of XDX. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | `payee` has exceeded its daily deposit limits for XDX. | /// /// # Related Scripts /// * `Script::create_child_vasp_account` /// * `Script::create_parent_vasp_account` /// * `Script::add_currency_to_account` pub fn encode_peer_to_peer_with_metadata_script( currency: TypeTag, payee: AccountAddress, amount: u64, metadata: Vec<u8>, metadata_signature: Vec<u8>, ) -> Script { Script::new( PEER_TO_PEER_WITH_METADATA_CODE.to_vec(), vec![currency], vec![ TransactionArgument::Address(payee), TransactionArgument::U64(amount), TransactionArgument::U8Vector(metadata), TransactionArgument::U8Vector(metadata_signature), ], ) } /// # Summary /// Moves a specified number of coins in a given currency from the account's /// balance to its preburn area after which the coins may be burned. This /// transaction may be sent by any account that holds a balance and preburn area /// in the specified currency. /// /// # Technical Description /// Moves the specified `amount` of coins in `Token` currency from the sending `account`'s /// `DiemAccount::Balance<Token>` to the `Diem::Preburn<Token>` published under the same /// `account`. `account` must have both of these resources published under it at the start of this /// transaction in order for it to execute successfully. /// /// ## Events /// Successful execution of this script emits two events: /// * `DiemAccount::SentPaymentEvent ` on `account`'s `DiemAccount::DiemAccount` `sent_events` /// handle with the `payee` and `payer` fields being `account`'s address; and /// * A `Diem::PreburnEvent` with `Token`'s currency code on the /// `Diem::CurrencyInfo<Token`'s `preburn_events` handle for `Token` and with /// `preburn_address` set to `account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Token` | Type | The Move type for the `Token` currency being moved to the preburn area. `Token` must be an already-registered currency on-chain. | /// | `account` | `&signer` | The signer reference of the sending account. | /// | `amount` | `u64` | The amount in `Token` to be moved to the preburn area. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | The `Token` is not a registered currency on-chain. | /// | `Errors::INVALID_STATE` | `DiemAccount::EWITHDRAWAL_CAPABILITY_ALREADY_EXTRACTED` | The withdrawal capability for `account` has already been extracted. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EINSUFFICIENT_BALANCE` | `amount` is greater than `payer`'s balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `DiemAccount::EPAYER_DOESNT_HOLD_CURRENCY` | `account` doesn't hold a balance in `Token`. | /// | `Errors::NOT_PUBLISHED` | `Diem::EPREBURN` | `account` doesn't have a `Diem::Preburn<Token>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EPREBURN_OCCUPIED` | The `value` field in the `Diem::Preburn<Token>` resource under the sender is non-zero. | /// | `Errors::NOT_PUBLISHED` | `Roles::EROLE_ID` | The `account` did not have a role assigned to it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDESIGNATED_DEALER` | The `account` did not have the role of DesignatedDealer. | /// /// # Related Scripts /// * `Script::cancel_burn` /// * `Script::burn` /// * `Script::burn_txn_fees` pub fn encode_preburn_script(token: TypeTag, amount: u64) -> Script { Script::new( PREBURN_CODE.to_vec(), vec![token], vec![TransactionArgument::U64(amount)], ) } /// # Summary /// Rotates the authentication key of the sending account to the /// newly-specified public key and publishes a new shared authentication key /// under the sender's account. Any account can send this transaction. /// /// # Technical Description /// Rotates the authentication key of the sending account to `public_key`, /// and publishes a `SharedEd25519PublicKey::SharedEd25519PublicKey` resource /// containing the 32-byte ed25519 `public_key` and the `DiemAccount::KeyRotationCapability` for /// `account` under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer reference of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | 32-byte Ed25519 public key for `account`' authentication key to be rotated to and stored. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability` resource. | /// | `Errors::ALREADY_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | The `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is already published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `Script::rotate_shared_ed25519_public_key` pub fn encode_publish_shared_ed25519_public_key_script(public_key: Vec<u8>) -> Script { Script::new( PUBLISH_SHARED_ED25519_PUBLIC_KEY_CODE.to_vec(), vec![], vec![TransactionArgument::U8Vector(public_key)], ) } /// # Summary /// Updates a validator's configuration. This does not reconfigure the system and will not update /// the configuration in the validator set that is seen by other validators in the network. Can /// only be successfully sent by a Validator Operator account that is already registered with a /// validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It does not emit a `DiemConfig::NewEpochEvent` /// so the copy of this config held in the validator set will not be updated, and the changes are /// only "locally" under the `validator_account` account address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `&signer` | Signer reference of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::add_validator_and_reconfigure` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` pub fn encode_register_validator_config_script( validator_account: AccountAddress, consensus_pubkey: Vec<u8>, validator_network_addresses: Vec<u8>, fullnode_network_addresses: Vec<u8>, ) -> Script { Script::new( REGISTER_VALIDATOR_CONFIG_CODE.to_vec(), vec![], vec![ TransactionArgument::Address(validator_account), TransactionArgument::U8Vector(consensus_pubkey), TransactionArgument::U8Vector(validator_network_addresses), TransactionArgument::U8Vector(fullnode_network_addresses), ], ) } /// # Summary /// This script removes a validator account from the validator set, and triggers a reconfiguration /// of the system to remove the validator from the system. This transaction can only be /// successfully called by the Diem Root account. /// /// # Technical Description /// This script removes the account at `validator_address` from the validator set. This transaction /// emits a `DiemConfig::NewEpochEvent` event. Once the reconfiguration triggered by this event /// has been performed, the account at `validator_address` is no longer considered to be a /// validator in the network. This transaction will fail if the validator at `validator_address` /// is not in the validator set. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Diem Root signer. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `validator_name` | `vector<u8>` | ASCII-encoded human name for the validator. Must match the human name in the `ValidatorConfig::ValidatorConfig` for the validator. | /// | `validator_address` | `address` | The validator account address to be removed from the validator set. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | 0 | 0 | The provided `validator_name` does not match the already-recorded human name for the validator. | /// | `Errors::INVALID_ARGUMENT` | `DiemSystem::ENOT_AN_ACTIVE_VALIDATOR` | The validator to be removed is not in the validator set. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::REQUIRES_ROLE` | `Roles::EDIEM_ROOT` | The sending account is not the Diem Root account. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::add_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` pub fn encode_remove_validator_and_reconfigure_script( sliding_nonce: u64, validator_name: Vec<u8>, validator_address: AccountAddress, ) -> Script { Script::new( REMOVE_VALIDATOR_AND_RECONFIGURE_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U8Vector(validator_name), TransactionArgument::Address(validator_address), ], ) } /// # Summary /// Rotates the transaction sender's authentication key to the supplied new authentication key. May /// be sent by any account. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated /// its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key_with_nonce` /// * `Script::rotate_authentication_key_with_nonce_admin` /// * `Script::rotate_authentication_key_with_recovery_address` pub fn encode_rotate_authentication_key_script(new_key: Vec<u8>) -> Script { Script::new( ROTATE_AUTHENTICATION_KEY_CODE.to_vec(), vec![], vec![TransactionArgument::U8Vector(new_key)], ) } /// # Summary /// Rotates the sender's authentication key to the supplied new authentication key. May be sent by /// any account that has a sliding nonce resource published under it (usually this is Treasury /// Compliance or Diem Root accounts). /// /// # Technical Description /// Rotates the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated /// its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key` /// * `Script::rotate_authentication_key_with_nonce_admin` /// * `Script::rotate_authentication_key_with_recovery_address` pub fn encode_rotate_authentication_key_with_nonce_script( sliding_nonce: u64, new_key: Vec<u8>, ) -> Script { Script::new( ROTATE_AUTHENTICATION_KEY_WITH_NONCE_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U8Vector(new_key), ], ) } /// # Summary /// Rotates the specified account's authentication key to the supplied new authentication key. May /// only be sent by the Diem Root account as a write set transaction. /// /// # Technical Description /// Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. /// `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated /// its `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `&signer` | Signer reference of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key` /// * `Script::rotate_authentication_key_with_nonce` /// * `Script::rotate_authentication_key_with_recovery_address` pub fn encode_rotate_authentication_key_with_nonce_admin_script( sliding_nonce: u64, new_key: Vec<u8>, ) -> Script { Script::new( ROTATE_AUTHENTICATION_KEY_WITH_NONCE_ADMIN_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U8Vector(new_key), ], ) } /// # Summary /// Rotates the authentication key of a specified account that is part of a recovery address to a /// new authentication key. Only used for accounts that are part of a recovery address (see /// `Script::add_recovery_rotation_capability` for account restrictions). /// /// # Technical Description /// Rotates the authentication key of the `to_recover` account to `new_key` using the /// `DiemAccount::KeyRotationCapability` stored in the `RecoveryAddress::RecoveryAddress` resource /// published under `recovery_address`. This transaction can be sent either by the `to_recover` /// account, or by the account where the `RecoveryAddress::RecoveryAddress` resource is published /// that contains `to_recover`'s `DiemAccount::KeyRotationCapability`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `recovery_address` | `address` | Address where `RecoveryAddress::RecoveryAddress` that holds `to_recover`'s `DiemAccount::KeyRotationCapability` is published. | /// | `to_recover` | `address` | The address of the account whose authentication key will be updated. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for the account at the `to_recover` address. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `RecoveryAddress::ERECOVERY_ADDRESS` | `recovery_address` does not have a `RecoveryAddress::RecoveryAddress` resource published under it. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::ECANNOT_ROTATE_KEY` | The address of `account` is not `recovery_address` or `to_recover`. | /// | `Errors::INVALID_ARGUMENT` | `RecoveryAddress::EACCOUNT_NOT_RECOVERABLE` | `to_recover`'s `DiemAccount::KeyRotationCapability` is not in the `RecoveryAddress::RecoveryAddress` resource published under `recovery_address`. | /// | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | /// /// # Related Scripts /// * `Script::rotate_authentication_key` /// * `Script::rotate_authentication_key_with_nonce` /// * `Script::rotate_authentication_key_with_nonce_admin` pub fn encode_rotate_authentication_key_with_recovery_address_script( recovery_address: AccountAddress, to_recover: AccountAddress, new_key: Vec<u8>, ) -> Script { Script::new( ROTATE_AUTHENTICATION_KEY_WITH_RECOVERY_ADDRESS_CODE.to_vec(), vec![], vec![ TransactionArgument::Address(recovery_address), TransactionArgument::Address(to_recover), TransactionArgument::U8Vector(new_key), ], ) } /// # Summary /// Updates the url used for off-chain communication, and the public key used to verify dual /// attestation on-chain. Transaction can be sent by any account that has dual attestation /// information published under it. In practice the only such accounts are Designated Dealers and /// Parent VASPs. /// /// # Technical Description /// Updates the `base_url` and `compliance_public_key` fields of the `DualAttestation::Credential` /// resource published under `account`. The `new_key` must be a valid ed25519 public key. /// /// ## Events /// Successful execution of this transaction emits two events: /// * A `DualAttestation::ComplianceKeyRotationEvent` containing the new compliance public key, and /// the blockchain time at which the key was updated emitted on the `DualAttestation::Credential` /// `compliance_key_rotation_events` handle published under `account`; and /// * A `DualAttestation::BaseUrlRotationEvent` containing the new base url to be used for /// off-chain communication, and the blockchain time at which the url was updated emitted on the /// `DualAttestation::Credential` `base_url_rotation_events` handle published under `account`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account of the transaction. | /// | `new_url` | `vector<u8>` | ASCII-encoded url to be used for off-chain communication with `account`. | /// | `new_key` | `vector<u8>` | New ed25519 public key to be used for on-chain dual attestation checking. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `DualAttestation::ECREDENTIAL` | A `DualAttestation::Credential` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `DualAttestation::EINVALID_PUBLIC_KEY` | `new_key` is not a valid ed25519 public key. | /// /// # Related Scripts /// * `Script::create_parent_vasp_account` /// * `Script::create_designated_dealer` /// * `Script::rotate_dual_attestation_info` pub fn encode_rotate_dual_attestation_info_script(new_url: Vec<u8>, new_key: Vec<u8>) -> Script { Script::new( ROTATE_DUAL_ATTESTATION_INFO_CODE.to_vec(), vec![], vec![ TransactionArgument::U8Vector(new_url), TransactionArgument::U8Vector(new_key), ], ) } /// # Summary /// Rotates the authentication key in a `SharedEd25519PublicKey`. This transaction can be sent by /// any account that has previously published a shared ed25519 public key using /// `Script::publish_shared_ed25519_public_key`. /// /// # Technical Description /// This first rotates the public key stored in `account`'s /// `SharedEd25519PublicKey::SharedEd25519PublicKey` resource to `public_key`, after which it /// rotates the authentication key using the capability stored in `account`'s /// `SharedEd25519PublicKey::SharedEd25519PublicKey` to a new value derived from `public_key` /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer reference of the sending account of the transaction. | /// | `public_key` | `vector<u8>` | 32-byte Ed25519 public key. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SharedEd25519PublicKey::ESHARED_KEY` | A `SharedEd25519PublicKey::SharedEd25519PublicKey` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SharedEd25519PublicKey::EMALFORMED_PUBLIC_KEY` | `public_key` is an invalid ed25519 public key. | /// /// # Related Scripts /// * `Script::publish_shared_ed25519_public_key` pub fn encode_rotate_shared_ed25519_public_key_script(public_key: Vec<u8>) -> Script { Script::new( ROTATE_SHARED_ED25519_PUBLIC_KEY_CODE.to_vec(), vec![], vec![TransactionArgument::U8Vector(public_key)], ) } /// # Summary /// Updates a validator's configuration, and triggers a reconfiguration of the system to update the /// validator set with this new validator configuration. Can only be successfully sent by a /// Validator Operator account that is already registered with a validator. /// /// # Technical Description /// This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig` /// config resource held under `validator_account`. It then emits a `DiemConfig::NewEpochEvent` to /// trigger a reconfiguration of the system. This reconfiguration will update the validator set /// on-chain with the updated `ValidatorConfig::ValidatorConfig`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `validator_operator_account` | `&signer` | Signer reference of the sending account. Must be the registered validator operator for the validator at `validator_address`. | /// | `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. | /// | `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// | `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR_OPERATOR` | `validator_operator_account` does not have a Validator Operator role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. | /// | `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::add_validator_and_reconfigure` /// * `Script::remove_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::register_validator_config` pub fn encode_set_validator_config_and_reconfigure_script( validator_account: AccountAddress, consensus_pubkey: Vec<u8>, validator_network_addresses: Vec<u8>, fullnode_network_addresses: Vec<u8>, ) -> Script { Script::new( SET_VALIDATOR_CONFIG_AND_RECONFIGURE_CODE.to_vec(), vec![], vec![ TransactionArgument::Address(validator_account), TransactionArgument::U8Vector(consensus_pubkey), TransactionArgument::U8Vector(validator_network_addresses), TransactionArgument::U8Vector(fullnode_network_addresses), ], ) } /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by an account with /// Validator role. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the sending validator account. The account at `operator_account` address must have /// a Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` /// resource published under it. The sending `account` must be a Validator and have a /// `ValidatorConfig::ValidatorConfig` resource published under it. This script does not emit a /// `DiemConfig::NewEpochEvent` and no reconfiguration of the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | The signer reference of the sending account of the transaction. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::add_validator_and_reconfigure` /// * `Script::set_validator_operator_with_nonce_admin` /// * `Script::set_validator_config_and_reconfigure` pub fn encode_set_validator_operator_script( operator_name: Vec<u8>, operator_account: AccountAddress, ) -> Script { Script::new( SET_VALIDATOR_OPERATOR_CODE.to_vec(), vec![], vec![ TransactionArgument::U8Vector(operator_name), TransactionArgument::Address(operator_account), ], ) } /// # Summary /// Sets the validator operator for a validator in the validator's configuration resource "locally" /// and does not reconfigure the system. Changes from this transaction will not picked up by the /// system until a reconfiguration of the system is triggered. May only be sent by the Diem Root /// account as a write set transaction. /// /// # Technical Description /// Sets the account at `operator_account` address and with the specified `human_name` as an /// operator for the validator `account`. The account at `operator_account` address must have a /// Validator Operator role and have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource /// published under it. The account represented by the `account` signer must be a Validator and /// have a `ValidatorConfig::ValidatorConfig` resource published under it. No reconfiguration of /// the system is initiated by this script. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `dr_account` | `&signer` | The signer reference of the sending account of the write set transaction. May only be the Diem Root signer. | /// | `account` | `&signer` | Signer reference of account specified in the `execute_as` field of the write set transaction. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction for Diem Root. | /// | `operator_name` | `vector<u8>` | Validator operator's human name. | /// | `operator_account` | `address` | Address of the validator operator account to be added as the `account` validator's operator. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `dr_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` in `dr_account` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` in `dr_account` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` in` dr_account` has been previously recorded. | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | The sending account is not the Diem Root account or Treasury Compliance account | /// | `Errors::NOT_PUBLISHED` | `ValidatorOperatorConfig::EVALIDATOR_OPERATOR_CONFIG` | The `ValidatorOperatorConfig::ValidatorOperatorConfig` resource is not published under `operator_account`. | /// | 0 | 0 | The `human_name` field of the `ValidatorOperatorConfig::ValidatorOperatorConfig` resource under `operator_account` does not match the provided `human_name`. | /// | `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR` | `account` does not have a Validator account role. | /// | `Errors::INVALID_ARGUMENT` | `ValidatorConfig::ENOT_A_VALIDATOR_OPERATOR` | The account at `operator_account` does not have a `ValidatorOperatorConfig::ValidatorOperatorConfig` resource. | /// | `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | A `ValidatorConfig::ValidatorConfig` is not published under `account`. | /// /// # Related Scripts /// * `Script::create_validator_account` /// * `Script::create_validator_operator_account` /// * `Script::register_validator_config` /// * `Script::remove_validator_and_reconfigure` /// * `Script::add_validator_and_reconfigure` /// * `Script::set_validator_operator` /// * `Script::set_validator_config_and_reconfigure` pub fn encode_set_validator_operator_with_nonce_admin_script( sliding_nonce: u64, operator_name: Vec<u8>, operator_account: AccountAddress, ) -> Script { Script::new( SET_VALIDATOR_OPERATOR_WITH_NONCE_ADMIN_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U8Vector(operator_name), TransactionArgument::Address(operator_account), ], ) } /// # Summary /// Mints a specified number of coins in a currency to a Designated Dealer. The sending account /// must be the Treasury Compliance account, and coins can only be minted to a Designated Dealer /// account. /// /// # Technical Description /// Mints `mint_amount` of coins in the `CoinType` currency to Designated Dealer account at /// `designated_dealer_address`. The `tier_index` parameter specifies which tier should be used to /// check verify the off-chain approval policy, and is based in part on the on-chain tier values /// for the specific Designated Dealer, and the number of `CoinType` coins that have been minted to /// the dealer over the past 24 hours. Every Designated Dealer has 4 tiers for each currency that /// they support. The sending `tc_account` must be the Treasury Compliance account, and the /// receiver an authorized Designated Dealer account. /// /// ## Events /// Successful execution of the transaction will emit two events: /// * A `Diem::MintEvent` with the amount and currency code minted is emitted on the /// `mint_event_handle` in the stored `Diem::CurrencyInfo<CoinType>` resource stored under /// `0xA550C18`; and /// * A `DesignatedDealer::ReceivedMintEvent` with the amount, currency code, and Designated /// Dealer's address is emitted on the `mint_event_handle` in the stored `DesignatedDealer::Dealer` /// resource published under the `designated_dealer_address`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `CoinType` | Type | The Move type for the `CoinType` being minted. `CoinType` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `designated_dealer_address` | `address` | The address of the Designated Dealer account being minted to. | /// | `mint_amount` | `u64` | The number of coins to be minted. | /// | `tier_index` | `u64` | The mint tier index to use for the Designated Dealer account. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_MINT_AMOUNT` | `mint_amount` is zero. | /// | `Errors::NOT_PUBLISHED` | `DesignatedDealer::EDEALER` | `DesignatedDealer::Dealer` or `DesignatedDealer::TierInfo<CoinType>` resource does not exist at `designated_dealer_address`. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_TIER_INDEX` | The `tier_index` is out of bounds. | /// | `Errors::INVALID_ARGUMENT` | `DesignatedDealer::EINVALID_AMOUNT_FOR_TIER` | `mint_amount` exceeds the maximum allowed amount for `tier_index`. | /// | `Errors::REQUIRES_CAPABILITY` | `Diem::EMINT_CAPABILITY` | `tc_account` does not have a `Diem::MintCapability<CoinType>` resource published under it. | /// | `Errors::INVALID_STATE` | `Diem::EMINTING_NOT_ALLOWED` | Minting is not currently allowed for `CoinType` coins. | /// | `Errors::LIMIT_EXCEEDED` | `DiemAccount::EDEPOSIT_EXCEEDS_LIMITS` | The depositing of the funds would exceed the `account`'s account limits. | /// /// # Related Scripts /// * `Script::create_designated_dealer` /// * `Script::peer_to_peer_with_metadata` /// * `Script::rotate_dual_attestation_info` pub fn encode_tiered_mint_script( coin_type: TypeTag, sliding_nonce: u64, designated_dealer_address: AccountAddress, mint_amount: u64, tier_index: u64, ) -> Script { Script::new( TIERED_MINT_CODE.to_vec(), vec![coin_type], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(designated_dealer_address), TransactionArgument::U64(mint_amount), TransactionArgument::U64(tier_index), ], ) } /// # Summary /// Unfreezes the account at `address`. The sending account of this transaction must be the /// Treasury Compliance account. After the successful execution of this transaction transactions /// may be sent from the previously frozen account, and coins may be sent and received. /// /// # Technical Description /// Sets the `AccountFreezing::FreezingBit` to `false` and emits a /// `AccountFreezing::UnFreezeAccountEvent`. The transaction sender must be the Treasury Compliance /// account. Note that this is a per-account property so unfreezing a Parent VASP will not effect /// the status any of its child accounts and vice versa. /// /// ## Events /// Successful execution of this script will emit a `AccountFreezing::UnFreezeAccountEvent` with /// the `unfrozen_address` set the `to_unfreeze_account`'s address. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `to_unfreeze_account` | `address` | The account address to be frozen. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | The sending account is not the Treasury Compliance account. | /// /// # Related Scripts /// * `Script::freeze_account` pub fn encode_unfreeze_account_script( sliding_nonce: u64, to_unfreeze_account: AccountAddress, ) -> Script { Script::new( UNFREEZE_ACCOUNT_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::Address(to_unfreeze_account), ], ) } /// # Summary /// Updates the Diem major version that is stored on-chain and is used by the VM. This /// transaction can only be sent from the Diem Root account. /// /// # Technical Description /// Updates the `DiemVersion` on-chain config and emits a `DiemConfig::NewEpochEvent` to trigger /// a reconfiguration of the system. The `major` version that is passed in must be strictly greater /// than the current major version held on-chain. The VM reads this information and can use it to /// preserve backwards compatibility with previous major versions of the VM. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `account` | `&signer` | Signer reference of the sending account. Must be the Diem Root account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `major` | `u64` | The `major` version of the VM to be used from this transaction on. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::EDIEM_ROOT` | `account` is not the Diem Root account. | /// | `Errors::INVALID_ARGUMENT` | `DiemVersion::EINVALID_MAJOR_VERSION_NUMBER` | `major` is less-than or equal to the current major version stored on-chain. | pub fn encode_update_diem_version_script(sliding_nonce: u64, major: u64) -> Script { Script::new( UPDATE_DIEM_VERSION_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U64(major), ], ) } /// # Summary /// Update the dual attestation limit on-chain. Defined in terms of micro-XDX. The transaction can /// only be sent by the Treasury Compliance account. After this transaction all inter-VASP /// payments over this limit must be checked for dual attestation. /// /// # Technical Description /// Updates the `micro_xdx_limit` field of the `DualAttestation::Limit` resource published under /// `0xA550C18`. The amount is set in micro-XDX. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for this transaction. | /// | `new_micro_xdx_limit` | `u64` | The new dual attestation limit to be used on-chain. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// /// # Related Scripts /// * `Script::update_exchange_rate` /// * `Script::update_minting_ability` pub fn encode_update_dual_attestation_limit_script( sliding_nonce: u64, new_micro_xdx_limit: u64, ) -> Script { Script::new( UPDATE_DUAL_ATTESTATION_LIMIT_CODE.to_vec(), vec![], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U64(new_micro_xdx_limit), ], ) } /// # Summary /// Update the rough on-chain exchange rate between a specified currency and XDX (as a conversion /// to micro-XDX). The transaction can only be sent by the Treasury Compliance account. After this /// transaction the updated exchange rate will be used for normalization of gas prices, and for /// dual attestation checking. /// /// # Technical Description /// Updates the on-chain exchange rate from the given `Currency` to micro-XDX. The exchange rate /// is given by `new_exchange_rate_numerator/new_exchange_rate_denominator`. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose exchange rate is being updated. `Currency` must be an already-registered currency on-chain. | /// | `tc_account` | `&signer` | The signer reference of the sending account of this transaction. Must be the Treasury Compliance account. | /// | `sliding_nonce` | `u64` | The `sliding_nonce` (see: `SlidingNonce`) to be used for the transaction. | /// | `new_exchange_rate_numerator` | `u64` | The numerator for the new to micro-XDX exchange rate for `Currency`. | /// | `new_exchange_rate_denominator` | `u64` | The denominator for the new to micro-XDX exchange rate for `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::NOT_PUBLISHED` | `SlidingNonce::ESLIDING_NONCE` | A `SlidingNonce` resource is not published under `tc_account`. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_OLD` | The `sliding_nonce` is too old and it's impossible to determine if it's duplicated or not. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_TOO_NEW` | The `sliding_nonce` is too far in the future. | /// | `Errors::INVALID_ARGUMENT` | `SlidingNonce::ENONCE_ALREADY_RECORDED` | The `sliding_nonce` has been previously recorded. | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::REQUIRES_ROLE` | `Roles::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::EDENOMINATOR` | `new_exchange_rate_denominator` is zero. | /// | `Errors::INVALID_ARGUMENT` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// | `Errors::LIMIT_EXCEEDED` | `FixedPoint32::ERATIO_OUT_OF_RANGE` | The quotient is unrepresentable as a `FixedPoint32`. | /// /// # Related Scripts /// * `Script::update_dual_attestation_limit` /// * `Script::update_minting_ability` pub fn encode_update_exchange_rate_script( currency: TypeTag, sliding_nonce: u64, new_exchange_rate_numerator: u64, new_exchange_rate_denominator: u64, ) -> Script { Script::new( UPDATE_EXCHANGE_RATE_CODE.to_vec(), vec![currency], vec![ TransactionArgument::U64(sliding_nonce), TransactionArgument::U64(new_exchange_rate_numerator), TransactionArgument::U64(new_exchange_rate_denominator), ], ) } /// # Summary /// Script to allow or disallow minting of new coins in a specified currency. This transaction can /// only be sent by the Treasury Compliance account. Turning minting off for a currency will have /// no effect on coins already in circulation, and coins may still be removed from the system. /// /// # Technical Description /// This transaction sets the `can_mint` field of the `Diem::CurrencyInfo<Currency>` resource /// published under `0xA550C18` to the value of `allow_minting`. Minting of coins if allowed if /// this field is set to `true` and minting of new coins in `Currency` is disallowed otherwise. /// This transaction needs to be sent by the Treasury Compliance account. /// /// # Parameters /// | Name | Type | Description | /// | ------ | ------ | ------------- | /// | `Currency` | Type | The Move type for the `Currency` whose minting ability is being updated. `Currency` must be an already-registered currency on-chain. | /// | `account` | `&signer` | Signer reference of the sending account. Must be the Diem Root account. | /// | `allow_minting` | `bool` | Whether to allow minting of new coins in `Currency`. | /// /// # Common Abort Conditions /// | Error Category | Error Reason | Description | /// | ---------------- | -------------- | ------------- | /// | `Errors::REQUIRES_ADDRESS` | `CoreAddresses::ETREASURY_COMPLIANCE` | `tc_account` is not the Treasury Compliance account. | /// | `Errors::NOT_PUBLISHED` | `Diem::ECURRENCY_INFO` | `Currency` is not a registered currency on-chain. | /// /// # Related Scripts /// * `Script::update_dual_attestation_limit` /// * `Script::update_exchange_rate` pub fn encode_update_minting_ability_script(currency: TypeTag, allow_minting: bool) -> Script { Script::new( UPDATE_MINTING_ABILITY_CODE.to_vec(), vec![currency], vec![TransactionArgument::Bool(allow_minting)], ) } fn decode_add_currency_to_account_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::AddCurrencyToAccount { currency: script.ty_args().get(0)?.clone(), }) } else { None } } fn decode_add_diem_id_domain_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::AddDiemIdDomain { address: bcs::from_bytes(script.args().get(0)?).ok()?, domain: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_add_recovery_rotation_capability_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::AddRecoveryRotationCapability { recovery_address: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_add_validator_and_reconfigure_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::AddValidatorAndReconfigure { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, validator_name: bcs::from_bytes(script.args().get(1)?).ok()?, validator_address: bcs::from_bytes(script.args().get(2)?).ok()?, }) } else { None } } fn decode_autopay_create_instruction_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::AutopayCreateInstruction { uid: bcs::from_bytes(script.args().get(0)?).ok()?, in_type: bcs::from_bytes(script.args().get(1)?).ok()?, payee: bcs::from_bytes(script.args().get(2)?).ok()?, end_epoch: bcs::from_bytes(script.args().get(3)?).ok()?, value: bcs::from_bytes(script.args().get(4)?).ok()?, }) } else { None } } fn decode_autopay_disable_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::AutopayDisable {}) } else { None } } fn decode_autopay_enable_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::AutopayEnable {}) } else { None } } fn decode_balance_transfer_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::BalanceTransfer { destination: bcs::from_bytes(script.args().get(0)?).ok()?, unscaled_value: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_burn_txn_fees_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::BurnTxnFees { coin_type: script.ty_args().get(0)?.clone(), }) } else { None } } fn decode_burn_with_amount_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::BurnWithAmount { token: script.ty_args().get(0)?.clone(), sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, preburn_address: bcs::from_bytes(script.args().get(1)?).ok()?, amount: bcs::from_bytes(script.args().get(2)?).ok()?, }) } else { None } } fn decode_cancel_burn_with_amount_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CancelBurnWithAmount { token: script.ty_args().get(0)?.clone(), preburn_address: bcs::from_bytes(script.args().get(0)?).ok()?, amount: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_community_transfer_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CommunityTransfer { destination: bcs::from_bytes(script.args().get(0)?).ok()?, unscaled_value: bcs::from_bytes(script.args().get(1)?).ok()?, memo: bcs::from_bytes(script.args().get(2)?).ok()?, }) } else { None } } fn decode_create_acc_user_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateAccUser { challenge: bcs::from_bytes(script.args().get(0)?).ok()?, solution: bcs::from_bytes(script.args().get(1)?).ok()?, difficulty: bcs::from_bytes(script.args().get(2)?).ok()?, security: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_create_acc_val_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateAccVal { challenge: bcs::from_bytes(script.args().get(0)?).ok()?, solution: bcs::from_bytes(script.args().get(1)?).ok()?, difficulty: bcs::from_bytes(script.args().get(2)?).ok()?, security: bcs::from_bytes(script.args().get(3)?).ok()?, ow_human_name: bcs::from_bytes(script.args().get(4)?).ok()?, op_address: bcs::from_bytes(script.args().get(5)?).ok()?, op_auth_key_prefix: bcs::from_bytes(script.args().get(6)?).ok()?, op_consensus_pubkey: bcs::from_bytes(script.args().get(7)?).ok()?, op_validator_network_addresses: bcs::from_bytes(script.args().get(8)?).ok()?, op_fullnode_network_addresses: bcs::from_bytes(script.args().get(9)?).ok()?, op_human_name: bcs::from_bytes(script.args().get(10)?).ok()?, }) } else { None } } fn decode_create_child_vasp_account_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateChildVaspAccount { coin_type: script.ty_args().get(0)?.clone(), child_address: bcs::from_bytes(script.args().get(0)?).ok()?, auth_key_prefix: bcs::from_bytes(script.args().get(1)?).ok()?, add_all_currencies: bcs::from_bytes(script.args().get(2)?).ok()?, child_initial_balance: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_create_designated_dealer_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateDesignatedDealer { currency: script.ty_args().get(0)?.clone(), sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, addr: bcs::from_bytes(script.args().get(1)?).ok()?, auth_key_prefix: bcs::from_bytes(script.args().get(2)?).ok()?, human_name: bcs::from_bytes(script.args().get(3)?).ok()?, add_all_currencies: bcs::from_bytes(script.args().get(4)?).ok()?, }) } else { None } } fn decode_create_diem_id_domains_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::CreateDiemIdDomains {}) } else { None } } fn decode_create_parent_vasp_account_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateParentVaspAccount { coin_type: script.ty_args().get(0)?.clone(), sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, new_account_address: bcs::from_bytes(script.args().get(1)?).ok()?, auth_key_prefix: bcs::from_bytes(script.args().get(2)?).ok()?, human_name: bcs::from_bytes(script.args().get(3)?).ok()?, add_all_currencies: bcs::from_bytes(script.args().get(4)?).ok()?, }) } else { None } } fn decode_create_recovery_address_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::CreateRecoveryAddress {}) } else { None } } fn decode_create_user_by_coin_tx_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateUserByCoinTx { account: bcs::from_bytes(script.args().get(0)?).ok()?, authkey_prefix: bcs::from_bytes(script.args().get(1)?).ok()?, unscaled_value: bcs::from_bytes(script.args().get(2)?).ok()?, }) } else { None } } fn decode_create_validator_account_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateValidatorAccount { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, new_account_address: bcs::from_bytes(script.args().get(1)?).ok()?, auth_key_prefix: bcs::from_bytes(script.args().get(2)?).ok()?, human_name: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_create_validator_operator_account_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::CreateValidatorOperatorAccount { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, new_account_address: bcs::from_bytes(script.args().get(1)?).ok()?, auth_key_prefix: bcs::from_bytes(script.args().get(2)?).ok()?, human_name: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_demo_e2e_script_function(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::DemoE2e { world: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_freeze_account_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::FreezeAccount { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, to_freeze_account: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_initialize_diem_consensus_config_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::InitializeDiemConsensusConfig { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_join_script_function(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::Join {}) } else { None } } fn decode_leave_script_function(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::Leave {}) } else { None } } fn decode_minerstate_commit_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::MinerstateCommit { challenge: bcs::from_bytes(script.args().get(0)?).ok()?, solution: bcs::from_bytes(script.args().get(1)?).ok()?, difficulty: bcs::from_bytes(script.args().get(2)?).ok()?, security: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_minerstate_commit_by_operator_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::MinerstateCommitByOperator { owner_address: bcs::from_bytes(script.args().get(0)?).ok()?, challenge: bcs::from_bytes(script.args().get(1)?).ok()?, solution: bcs::from_bytes(script.args().get(2)?).ok()?, difficulty: bcs::from_bytes(script.args().get(3)?).ok()?, security: bcs::from_bytes(script.args().get(4)?).ok()?, }) } else { None } } fn decode_minerstate_helper_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::MinerstateHelper {}) } else { None } } fn decode_ol_delegate_vote_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::OlDelegateVote { dest: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_ol_enable_delegation_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::OlEnableDelegation {}) } else { None } } fn decode_ol_oracle_tx_script_function(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::OlOracleTx { id: bcs::from_bytes(script.args().get(0)?).ok()?, data: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_ol_reconfig_bulk_update_setup_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::OlReconfigBulkUpdateSetup { alice: bcs::from_bytes(script.args().get(0)?).ok()?, bob: bcs::from_bytes(script.args().get(1)?).ok()?, carol: bcs::from_bytes(script.args().get(2)?).ok()?, sha: bcs::from_bytes(script.args().get(3)?).ok()?, ram: bcs::from_bytes(script.args().get(4)?).ok()?, }) } else { None } } fn decode_ol_remove_delegation_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::OlRemoveDelegation {}) } else { None } } fn decode_peer_to_peer_with_metadata_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::PeerToPeerWithMetadata { currency: script.ty_args().get(0)?.clone(), payee: bcs::from_bytes(script.args().get(0)?).ok()?, amount: bcs::from_bytes(script.args().get(1)?).ok()?, metadata: bcs::from_bytes(script.args().get(2)?).ok()?, metadata_signature: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_preburn_script_function(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::Preburn { token: script.ty_args().get(0)?.clone(), amount: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_publish_shared_ed25519_public_key_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::PublishSharedEd25519PublicKey { public_key: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_register_validator_config_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RegisterValidatorConfig { validator_account: bcs::from_bytes(script.args().get(0)?).ok()?, consensus_pubkey: bcs::from_bytes(script.args().get(1)?).ok()?, validator_network_addresses: bcs::from_bytes(script.args().get(2)?).ok()?, fullnode_network_addresses: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_remove_diem_id_domain_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RemoveDiemIdDomain { address: bcs::from_bytes(script.args().get(0)?).ok()?, domain: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_remove_validator_and_reconfigure_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RemoveValidatorAndReconfigure { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, validator_name: bcs::from_bytes(script.args().get(1)?).ok()?, validator_address: bcs::from_bytes(script.args().get(2)?).ok()?, }) } else { None } } fn decode_rotate_authentication_key_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RotateAuthenticationKey { new_key: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_rotate_authentication_key_with_nonce_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RotateAuthenticationKeyWithNonce { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, new_key: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_rotate_authentication_key_with_nonce_admin_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RotateAuthenticationKeyWithNonceAdmin { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, new_key: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_rotate_authentication_key_with_recovery_address_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some( ScriptFunctionCall::RotateAuthenticationKeyWithRecoveryAddress { recovery_address: bcs::from_bytes(script.args().get(0)?).ok()?, to_recover: bcs::from_bytes(script.args().get(1)?).ok()?, new_key: bcs::from_bytes(script.args().get(2)?).ok()?, }, ) } else { None } } fn decode_rotate_dual_attestation_info_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RotateDualAttestationInfo { new_url: bcs::from_bytes(script.args().get(0)?).ok()?, new_key: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_rotate_shared_ed25519_public_key_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::RotateSharedEd25519PublicKey { public_key: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_set_gas_constants_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::SetGasConstants { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, global_memory_per_byte_cost: bcs::from_bytes(script.args().get(1)?).ok()?, global_memory_per_byte_write_cost: bcs::from_bytes(script.args().get(2)?).ok()?, min_transaction_gas_units: bcs::from_bytes(script.args().get(3)?).ok()?, large_transaction_cutoff: bcs::from_bytes(script.args().get(4)?).ok()?, intrinsic_gas_per_byte: bcs::from_bytes(script.args().get(5)?).ok()?, maximum_number_of_gas_units: bcs::from_bytes(script.args().get(6)?).ok()?, min_price_per_gas_unit: bcs::from_bytes(script.args().get(7)?).ok()?, max_price_per_gas_unit: bcs::from_bytes(script.args().get(8)?).ok()?, max_transaction_size_in_bytes: bcs::from_bytes(script.args().get(9)?).ok()?, gas_unit_scaling_factor: bcs::from_bytes(script.args().get(10)?).ok()?, default_account_size: bcs::from_bytes(script.args().get(11)?).ok()?, }) } else { None } } fn decode_set_validator_config_and_reconfigure_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::SetValidatorConfigAndReconfigure { validator_account: bcs::from_bytes(script.args().get(0)?).ok()?, consensus_pubkey: bcs::from_bytes(script.args().get(1)?).ok()?, validator_network_addresses: bcs::from_bytes(script.args().get(2)?).ok()?, fullnode_network_addresses: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_set_validator_operator_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::SetValidatorOperator { operator_name: bcs::from_bytes(script.args().get(0)?).ok()?, operator_account: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_set_validator_operator_with_nonce_admin_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::SetValidatorOperatorWithNonceAdmin { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, operator_name: bcs::from_bytes(script.args().get(1)?).ok()?, operator_account: bcs::from_bytes(script.args().get(2)?).ok()?, }) } else { None } } fn decode_set_wallet_type_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::SetWalletType { type_of: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_tiered_mint_script_function(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::TieredMint { coin_type: script.ty_args().get(0)?.clone(), sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, designated_dealer_address: bcs::from_bytes(script.args().get(1)?).ok()?, mint_amount: bcs::from_bytes(script.args().get(2)?).ok()?, tier_index: bcs::from_bytes(script.args().get(3)?).ok()?, }) } else { None } } fn decode_unfreeze_account_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::UnfreezeAccount { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, to_unfreeze_account: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_update_diem_consensus_config_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::UpdateDiemConsensusConfig { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, config: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_update_diem_version_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::UpdateDiemVersion { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, major: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_update_dual_attestation_limit_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::UpdateDualAttestationLimit { sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, new_micro_xdx_limit: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } fn decode_update_exchange_rate_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::UpdateExchangeRate { currency: script.ty_args().get(0)?.clone(), sliding_nonce: bcs::from_bytes(script.args().get(0)?).ok()?, new_exchange_rate_numerator: bcs::from_bytes(script.args().get(1)?).ok()?, new_exchange_rate_denominator: bcs::from_bytes(script.args().get(2)?).ok()?, }) } else { None } } fn decode_update_minting_ability_script_function( payload: &TransactionPayload, ) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(script) = payload { Some(ScriptFunctionCall::UpdateMintingAbility { currency: script.ty_args().get(0)?.clone(), allow_minting: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } fn decode_val_add_self_script_function(payload: &TransactionPayload) -> Option<ScriptFunctionCall> { if let TransactionPayload::ScriptFunction(_script) = payload { Some(ScriptFunctionCall::ValAddSelf {}) } else { None } } fn decode_add_currency_to_account_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::AddCurrencyToAccount { currency: script.ty_args().get(0)?.clone(), }) } fn decode_add_recovery_rotation_capability_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::AddRecoveryRotationCapability { recovery_address: decode_address_argument(script.args().get(0)?.clone())?, }) } fn decode_add_validator_and_reconfigure_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::AddValidatorAndReconfigure { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, validator_name: decode_u8vector_argument(script.args().get(1)?.clone())?, validator_address: decode_address_argument(script.args().get(2)?.clone())?, }) } fn decode_burn_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::Burn { token: script.ty_args().get(0)?.clone(), sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, preburn_address: decode_address_argument(script.args().get(1)?.clone())?, }) } fn decode_burn_txn_fees_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::BurnTxnFees { coin_type: script.ty_args().get(0)?.clone(), }) } fn decode_cancel_burn_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::CancelBurn { token: script.ty_args().get(0)?.clone(), preburn_address: decode_address_argument(script.args().get(0)?.clone())?, }) } fn decode_create_child_vasp_account_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::CreateChildVaspAccount { coin_type: script.ty_args().get(0)?.clone(), child_address: decode_address_argument(script.args().get(0)?.clone())?, auth_key_prefix: decode_u8vector_argument(script.args().get(1)?.clone())?, add_all_currencies: decode_bool_argument(script.args().get(2)?.clone())?, child_initial_balance: decode_u64_argument(script.args().get(3)?.clone())?, }) } fn decode_create_designated_dealer_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::CreateDesignatedDealer { currency: script.ty_args().get(0)?.clone(), sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, addr: decode_address_argument(script.args().get(1)?.clone())?, auth_key_prefix: decode_u8vector_argument(script.args().get(2)?.clone())?, human_name: decode_u8vector_argument(script.args().get(3)?.clone())?, add_all_currencies: decode_bool_argument(script.args().get(4)?.clone())?, }) } fn decode_create_parent_vasp_account_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::CreateParentVaspAccount { coin_type: script.ty_args().get(0)?.clone(), sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, new_account_address: decode_address_argument(script.args().get(1)?.clone())?, auth_key_prefix: decode_u8vector_argument(script.args().get(2)?.clone())?, human_name: decode_u8vector_argument(script.args().get(3)?.clone())?, add_all_currencies: decode_bool_argument(script.args().get(4)?.clone())?, }) } fn decode_create_recovery_address_script(_script: &Script) -> Option<ScriptCall> { Some(ScriptCall::CreateRecoveryAddress {}) } fn decode_create_validator_account_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::CreateValidatorAccount { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, new_account_address: decode_address_argument(script.args().get(1)?.clone())?, auth_key_prefix: decode_u8vector_argument(script.args().get(2)?.clone())?, human_name: decode_u8vector_argument(script.args().get(3)?.clone())?, }) } fn decode_create_validator_operator_account_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::CreateValidatorOperatorAccount { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, new_account_address: decode_address_argument(script.args().get(1)?.clone())?, auth_key_prefix: decode_u8vector_argument(script.args().get(2)?.clone())?, human_name: decode_u8vector_argument(script.args().get(3)?.clone())?, }) } fn decode_freeze_account_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::FreezeAccount { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, to_freeze_account: decode_address_argument(script.args().get(1)?.clone())?, }) } fn decode_peer_to_peer_with_metadata_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::PeerToPeerWithMetadata { currency: script.ty_args().get(0)?.clone(), payee: decode_address_argument(script.args().get(0)?.clone())?, amount: decode_u64_argument(script.args().get(1)?.clone())?, metadata: decode_u8vector_argument(script.args().get(2)?.clone())?, metadata_signature: decode_u8vector_argument(script.args().get(3)?.clone())?, }) } fn decode_preburn_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::Preburn { token: script.ty_args().get(0)?.clone(), amount: decode_u64_argument(script.args().get(0)?.clone())?, }) } fn decode_publish_shared_ed25519_public_key_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::PublishSharedEd25519PublicKey { public_key: decode_u8vector_argument(script.args().get(0)?.clone())?, }) } fn decode_register_validator_config_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::RegisterValidatorConfig { validator_account: decode_address_argument(script.args().get(0)?.clone())?, consensus_pubkey: decode_u8vector_argument(script.args().get(1)?.clone())?, validator_network_addresses: decode_u8vector_argument(script.args().get(2)?.clone())?, fullnode_network_addresses: decode_u8vector_argument(script.args().get(3)?.clone())?, }) } fn decode_remove_validator_and_reconfigure_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::RemoveValidatorAndReconfigure { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, validator_name: decode_u8vector_argument(script.args().get(1)?.clone())?, validator_address: decode_address_argument(script.args().get(2)?.clone())?, }) } fn decode_rotate_authentication_key_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::RotateAuthenticationKey { new_key: decode_u8vector_argument(script.args().get(0)?.clone())?, }) } fn decode_rotate_authentication_key_with_nonce_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::RotateAuthenticationKeyWithNonce { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, new_key: decode_u8vector_argument(script.args().get(1)?.clone())?, }) } fn decode_rotate_authentication_key_with_nonce_admin_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::RotateAuthenticationKeyWithNonceAdmin { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, new_key: decode_u8vector_argument(script.args().get(1)?.clone())?, }) } fn decode_rotate_authentication_key_with_recovery_address_script( script: &Script, ) -> Option<ScriptCall> { Some(ScriptCall::RotateAuthenticationKeyWithRecoveryAddress { recovery_address: decode_address_argument(script.args().get(0)?.clone())?, to_recover: decode_address_argument(script.args().get(1)?.clone())?, new_key: decode_u8vector_argument(script.args().get(2)?.clone())?, }) } fn decode_rotate_dual_attestation_info_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::RotateDualAttestationInfo { new_url: decode_u8vector_argument(script.args().get(0)?.clone())?, new_key: decode_u8vector_argument(script.args().get(1)?.clone())?, }) } fn decode_rotate_shared_ed25519_public_key_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::RotateSharedEd25519PublicKey { public_key: decode_u8vector_argument(script.args().get(0)?.clone())?, }) } fn decode_set_validator_config_and_reconfigure_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::SetValidatorConfigAndReconfigure { validator_account: decode_address_argument(script.args().get(0)?.clone())?, consensus_pubkey: decode_u8vector_argument(script.args().get(1)?.clone())?, validator_network_addresses: decode_u8vector_argument(script.args().get(2)?.clone())?, fullnode_network_addresses: decode_u8vector_argument(script.args().get(3)?.clone())?, }) } fn decode_set_validator_operator_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::SetValidatorOperator { operator_name: decode_u8vector_argument(script.args().get(0)?.clone())?, operator_account: decode_address_argument(script.args().get(1)?.clone())?, }) } fn decode_set_validator_operator_with_nonce_admin_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::SetValidatorOperatorWithNonceAdmin { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, operator_name: decode_u8vector_argument(script.args().get(1)?.clone())?, operator_account: decode_address_argument(script.args().get(2)?.clone())?, }) } fn decode_tiered_mint_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::TieredMint { coin_type: script.ty_args().get(0)?.clone(), sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, designated_dealer_address: decode_address_argument(script.args().get(1)?.clone())?, mint_amount: decode_u64_argument(script.args().get(2)?.clone())?, tier_index: decode_u64_argument(script.args().get(3)?.clone())?, }) } fn decode_unfreeze_account_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::UnfreezeAccount { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, to_unfreeze_account: decode_address_argument(script.args().get(1)?.clone())?, }) } fn decode_update_diem_version_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::UpdateDiemVersion { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, major: decode_u64_argument(script.args().get(1)?.clone())?, }) } fn decode_update_dual_attestation_limit_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::UpdateDualAttestationLimit { sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, new_micro_xdx_limit: decode_u64_argument(script.args().get(1)?.clone())?, }) } fn decode_update_exchange_rate_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::UpdateExchangeRate { currency: script.ty_args().get(0)?.clone(), sliding_nonce: decode_u64_argument(script.args().get(0)?.clone())?, new_exchange_rate_numerator: decode_u64_argument(script.args().get(1)?.clone())?, new_exchange_rate_denominator: decode_u64_argument(script.args().get(2)?.clone())?, }) } fn decode_update_minting_ability_script(script: &Script) -> Option<ScriptCall> { Some(ScriptCall::UpdateMintingAbility { currency: script.ty_args().get(0)?.clone(), allow_minting: decode_bool_argument(script.args().get(0)?.clone())?, }) } type TransactionScriptDecoderMap = std::collections::HashMap< Vec<u8>, Box<dyn Fn(&Script) -> Option<ScriptCall> + std::marker::Sync + std::marker::Send>, >; static TRANSACTION_SCRIPT_DECODER_MAP: once_cell::sync::Lazy<TransactionScriptDecoderMap> = once_cell::sync::Lazy::new(|| { let mut map: TransactionScriptDecoderMap = std::collections::HashMap::new(); map.insert( ADD_CURRENCY_TO_ACCOUNT_CODE.to_vec(), Box::new(decode_add_currency_to_account_script), ); map.insert( ADD_RECOVERY_ROTATION_CAPABILITY_CODE.to_vec(), Box::new(decode_add_recovery_rotation_capability_script), ); map.insert( ADD_VALIDATOR_AND_RECONFIGURE_CODE.to_vec(), Box::new(decode_add_validator_and_reconfigure_script), ); map.insert(BURN_CODE.to_vec(), Box::new(decode_burn_script)); map.insert( BURN_TXN_FEES_CODE.to_vec(), Box::new(decode_burn_txn_fees_script), ); map.insert( CANCEL_BURN_CODE.to_vec(), Box::new(decode_cancel_burn_script), ); map.insert( CREATE_CHILD_VASP_ACCOUNT_CODE.to_vec(), Box::new(decode_create_child_vasp_account_script), ); map.insert( CREATE_DESIGNATED_DEALER_CODE.to_vec(), Box::new(decode_create_designated_dealer_script), ); map.insert( CREATE_PARENT_VASP_ACCOUNT_CODE.to_vec(), Box::new(decode_create_parent_vasp_account_script), ); map.insert( CREATE_RECOVERY_ADDRESS_CODE.to_vec(), Box::new(decode_create_recovery_address_script), ); map.insert( CREATE_VALIDATOR_ACCOUNT_CODE.to_vec(), Box::new(decode_create_validator_account_script), ); map.insert( CREATE_VALIDATOR_OPERATOR_ACCOUNT_CODE.to_vec(), Box::new(decode_create_validator_operator_account_script), ); map.insert( FREEZE_ACCOUNT_CODE.to_vec(), Box::new(decode_freeze_account_script), ); map.insert( PEER_TO_PEER_WITH_METADATA_CODE.to_vec(), Box::new(decode_peer_to_peer_with_metadata_script), ); map.insert(PREBURN_CODE.to_vec(), Box::new(decode_preburn_script)); map.insert( PUBLISH_SHARED_ED25519_PUBLIC_KEY_CODE.to_vec(), Box::new(decode_publish_shared_ed25519_public_key_script), ); map.insert( REGISTER_VALIDATOR_CONFIG_CODE.to_vec(), Box::new(decode_register_validator_config_script), ); map.insert( REMOVE_VALIDATOR_AND_RECONFIGURE_CODE.to_vec(), Box::new(decode_remove_validator_and_reconfigure_script), ); map.insert( ROTATE_AUTHENTICATION_KEY_CODE.to_vec(), Box::new(decode_rotate_authentication_key_script), ); map.insert( ROTATE_AUTHENTICATION_KEY_WITH_NONCE_CODE.to_vec(), Box::new(decode_rotate_authentication_key_with_nonce_script), ); map.insert( ROTATE_AUTHENTICATION_KEY_WITH_NONCE_ADMIN_CODE.to_vec(), Box::new(decode_rotate_authentication_key_with_nonce_admin_script), ); map.insert( ROTATE_AUTHENTICATION_KEY_WITH_RECOVERY_ADDRESS_CODE.to_vec(), Box::new(decode_rotate_authentication_key_with_recovery_address_script), ); map.insert( ROTATE_DUAL_ATTESTATION_INFO_CODE.to_vec(), Box::new(decode_rotate_dual_attestation_info_script), ); map.insert( ROTATE_SHARED_ED25519_PUBLIC_KEY_CODE.to_vec(), Box::new(decode_rotate_shared_ed25519_public_key_script), ); map.insert( SET_VALIDATOR_CONFIG_AND_RECONFIGURE_CODE.to_vec(), Box::new(decode_set_validator_config_and_reconfigure_script), ); map.insert( SET_VALIDATOR_OPERATOR_CODE.to_vec(), Box::new(decode_set_validator_operator_script), ); map.insert( SET_VALIDATOR_OPERATOR_WITH_NONCE_ADMIN_CODE.to_vec(), Box::new(decode_set_validator_operator_with_nonce_admin_script), ); map.insert( TIERED_MINT_CODE.to_vec(), Box::new(decode_tiered_mint_script), ); map.insert( UNFREEZE_ACCOUNT_CODE.to_vec(), Box::new(decode_unfreeze_account_script), ); map.insert( UPDATE_DIEM_VERSION_CODE.to_vec(), Box::new(decode_update_diem_version_script), ); map.insert( UPDATE_DUAL_ATTESTATION_LIMIT_CODE.to_vec(), Box::new(decode_update_dual_attestation_limit_script), ); map.insert( UPDATE_EXCHANGE_RATE_CODE.to_vec(), Box::new(decode_update_exchange_rate_script), ); map.insert( UPDATE_MINTING_ABILITY_CODE.to_vec(), Box::new(decode_update_minting_ability_script), ); map }); type ScriptFunctionDecoderMap = std::collections::HashMap< String, Box< dyn Fn(&TransactionPayload) -> Option<ScriptFunctionCall> + std::marker::Sync + std::marker::Send, >, >; static SCRIPT_FUNCTION_DECODER_MAP: once_cell::sync::Lazy<ScriptFunctionDecoderMap> = once_cell::sync::Lazy::new(|| { let mut map: ScriptFunctionDecoderMap = std::collections::HashMap::new(); map.insert( "AccountAdministrationScriptsadd_currency_to_account".to_string(), Box::new(decode_add_currency_to_account_script_function), ); map.insert( "TreasuryComplianceScriptsadd_diem_id_domain".to_string(), Box::new(decode_add_diem_id_domain_script_function), ); map.insert( "AccountAdministrationScriptsadd_recovery_rotation_capability".to_string(), Box::new(decode_add_recovery_rotation_capability_script_function), ); map.insert( "ValidatorAdministrationScriptsadd_validator_and_reconfigure".to_string(), Box::new(decode_add_validator_and_reconfigure_script_function), ); map.insert( "AutoPayScriptsautopay_create_instruction".to_string(), Box::new(decode_autopay_create_instruction_script_function), ); map.insert( "AutoPayScriptsautopay_disable".to_string(), Box::new(decode_autopay_disable_script_function), ); map.insert( "AutoPayScriptsautopay_enable".to_string(), Box::new(decode_autopay_enable_script_function), ); map.insert( "TransferScriptsbalance_transfer".to_string(), Box::new(decode_balance_transfer_script_function), ); map.insert( "TreasuryComplianceScriptsburn_txn_fees".to_string(), Box::new(decode_burn_txn_fees_script_function), ); map.insert( "TreasuryComplianceScriptsburn_with_amount".to_string(), Box::new(decode_burn_with_amount_script_function), ); map.insert( "TreasuryComplianceScriptscancel_burn_with_amount".to_string(), Box::new(decode_cancel_burn_with_amount_script_function), ); map.insert( "TransferScriptscommunity_transfer".to_string(), Box::new(decode_community_transfer_script_function), ); map.insert( "AccountScriptscreate_acc_user".to_string(), Box::new(decode_create_acc_user_script_function), ); map.insert( "AccountScriptscreate_acc_val".to_string(), Box::new(decode_create_acc_val_script_function), ); map.insert( "AccountCreationScriptscreate_child_vasp_account".to_string(), Box::new(decode_create_child_vasp_account_script_function), ); map.insert( "AccountCreationScriptscreate_designated_dealer".to_string(), Box::new(decode_create_designated_dealer_script_function), ); map.insert( "AccountAdministrationScriptscreate_diem_id_domains".to_string(), Box::new(decode_create_diem_id_domains_script_function), ); map.insert( "AccountCreationScriptscreate_parent_vasp_account".to_string(), Box::new(decode_create_parent_vasp_account_script_function), ); map.insert( "AccountAdministrationScriptscreate_recovery_address".to_string(), Box::new(decode_create_recovery_address_script_function), ); map.insert( "AccountScriptscreate_user_by_coin_tx".to_string(), Box::new(decode_create_user_by_coin_tx_script_function), ); map.insert( "AccountCreationScriptscreate_validator_account".to_string(), Box::new(decode_create_validator_account_script_function), ); map.insert( "AccountCreationScriptscreate_validator_operator_account".to_string(), Box::new(decode_create_validator_operator_account_script_function), ); map.insert( "DemoScriptsdemo_e2e".to_string(), Box::new(decode_demo_e2e_script_function), ); map.insert( "TreasuryComplianceScriptsfreeze_account".to_string(), Box::new(decode_freeze_account_script_function), ); map.insert( "SystemAdministrationScriptsinitialize_diem_consensus_config".to_string(), Box::new(decode_initialize_diem_consensus_config_script_function), ); map.insert( "ValidatorScriptsjoin".to_string(), Box::new(decode_join_script_function), ); map.insert( "ValidatorScriptsleave".to_string(), Box::new(decode_leave_script_function), ); map.insert( "TowerStateScriptsminerstate_commit".to_string(), Box::new(decode_minerstate_commit_script_function), ); map.insert( "TowerStateScriptsminerstate_commit_by_operator".to_string(), Box::new(decode_minerstate_commit_by_operator_script_function), ); map.insert( "TowerStateScriptsminerstate_helper".to_string(), Box::new(decode_minerstate_helper_script_function), ); map.insert( "OracleScriptsol_delegate_vote".to_string(), Box::new(decode_ol_delegate_vote_script_function), ); map.insert( "OracleScriptsol_enable_delegation".to_string(), Box::new(decode_ol_enable_delegation_script_function), ); map.insert( "OracleScriptsol_oracle_tx".to_string(), Box::new(decode_ol_oracle_tx_script_function), ); map.insert( "ValidatorScriptsol_reconfig_bulk_update_setup".to_string(), Box::new(decode_ol_reconfig_bulk_update_setup_script_function), ); map.insert( "OracleScriptsol_remove_delegation".to_string(), Box::new(decode_ol_remove_delegation_script_function), ); map.insert( "PaymentScriptspeer_to_peer_with_metadata".to_string(), Box::new(decode_peer_to_peer_with_metadata_script_function), ); map.insert( "TreasuryComplianceScriptspreburn".to_string(), Box::new(decode_preburn_script_function), ); map.insert( "AccountAdministrationScriptspublish_shared_ed25519_public_key".to_string(), Box::new(decode_publish_shared_ed25519_public_key_script_function), ); map.insert( "ValidatorAdministrationScriptsregister_validator_config".to_string(), Box::new(decode_register_validator_config_script_function), ); map.insert( "TreasuryComplianceScriptsremove_diem_id_domain".to_string(), Box::new(decode_remove_diem_id_domain_script_function), ); map.insert( "ValidatorAdministrationScriptsremove_validator_and_reconfigure".to_string(), Box::new(decode_remove_validator_and_reconfigure_script_function), ); map.insert( "AccountAdministrationScriptsrotate_authentication_key".to_string(), Box::new(decode_rotate_authentication_key_script_function), ); map.insert( "AccountAdministrationScriptsrotate_authentication_key_with_nonce".to_string(), Box::new(decode_rotate_authentication_key_with_nonce_script_function), ); map.insert( "AccountAdministrationScriptsrotate_authentication_key_with_nonce_admin".to_string(), Box::new(decode_rotate_authentication_key_with_nonce_admin_script_function), ); map.insert( "AccountAdministrationScriptsrotate_authentication_key_with_recovery_address" .to_string(), Box::new(decode_rotate_authentication_key_with_recovery_address_script_function), ); map.insert( "AccountAdministrationScriptsrotate_dual_attestation_info".to_string(), Box::new(decode_rotate_dual_attestation_info_script_function), ); map.insert( "AccountAdministrationScriptsrotate_shared_ed25519_public_key".to_string(), Box::new(decode_rotate_shared_ed25519_public_key_script_function), ); map.insert( "SystemAdministrationScriptsset_gas_constants".to_string(), Box::new(decode_set_gas_constants_script_function), ); map.insert( "ValidatorAdministrationScriptsset_validator_config_and_reconfigure".to_string(), Box::new(decode_set_validator_config_and_reconfigure_script_function), ); map.insert( "ValidatorAdministrationScriptsset_validator_operator".to_string(), Box::new(decode_set_validator_operator_script_function), ); map.insert( "ValidatorAdministrationScriptsset_validator_operator_with_nonce_admin".to_string(), Box::new(decode_set_validator_operator_with_nonce_admin_script_function), ); map.insert( "WalletScriptsset_wallet_type".to_string(), Box::new(decode_set_wallet_type_script_function), ); map.insert( "TreasuryComplianceScriptstiered_mint".to_string(), Box::new(decode_tiered_mint_script_function), ); map.insert( "TreasuryComplianceScriptsunfreeze_account".to_string(), Box::new(decode_unfreeze_account_script_function), ); map.insert( "SystemAdministrationScriptsupdate_diem_consensus_config".to_string(), Box::new(decode_update_diem_consensus_config_script_function), ); map.insert( "SystemAdministrationScriptsupdate_diem_version".to_string(), Box::new(decode_update_diem_version_script_function), ); map.insert( "TreasuryComplianceScriptsupdate_dual_attestation_limit".to_string(), Box::new(decode_update_dual_attestation_limit_script_function), ); map.insert( "TreasuryComplianceScriptsupdate_exchange_rate".to_string(), Box::new(decode_update_exchange_rate_script_function), ); map.insert( "TreasuryComplianceScriptsupdate_minting_ability".to_string(), Box::new(decode_update_minting_ability_script_function), ); map.insert( "ValidatorScriptsval_add_self".to_string(), Box::new(decode_val_add_self_script_function), ); map }); fn decode_bool_argument(arg: TransactionArgument) -> Option<bool> { match arg { TransactionArgument::Bool(value) => Some(value), _ => None, } } fn decode_u8_argument(arg: TransactionArgument) -> Option<u8> { match arg { TransactionArgument::U8(value) => Some(value), _ => None, } } fn decode_u64_argument(arg: TransactionArgument) -> Option<u64> { match arg { TransactionArgument::U64(value) => Some(value), _ => None, } } fn decode_address_argument(arg: TransactionArgument) -> Option<AccountAddress> { match arg { TransactionArgument::Address(value) => Some(value), _ => None, } } fn decode_u8vector_argument(arg: TransactionArgument) -> Option<Vec<u8>> { match arg { TransactionArgument::U8Vector(value) => Some(value), _ => None, } } const ADD_CURRENCY_TO_ACCOUNT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 2, 3, 2, 6, 4, 8, 2, 5, 10, 7, 7, 17, 25, 8, 42, 16, 0, 0, 0, 1, 0, 1, 1, 1, 0, 2, 1, 6, 12, 0, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 97, 100, 100, 95, 99, 117, 114, 114, 101, 110, 99, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 3, 11, 0, 56, 0, 2, ]; const ADD_RECOVERY_ROTATION_CAPABILITY_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 2, 4, 4, 3, 8, 10, 5, 18, 15, 7, 33, 106, 8, 139, 1, 16, 0, 0, 0, 1, 0, 2, 1, 0, 0, 3, 0, 1, 0, 1, 4, 2, 3, 0, 1, 6, 12, 1, 8, 0, 2, 8, 0, 5, 0, 2, 6, 12, 5, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 15, 82, 101, 99, 111, 118, 101, 114, 121, 65, 100, 100, 114, 101, 115, 115, 21, 75, 101, 121, 82, 111, 116, 97, 116, 105, 111, 110, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 101, 120, 116, 114, 97, 99, 116, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 23, 97, 100, 100, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 3, 5, 11, 0, 17, 0, 10, 1, 17, 1, 2, ]; const ADD_VALIDATOR_AND_RECONFIGURE_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 6, 3, 6, 15, 5, 21, 24, 7, 45, 91, 8, 136, 1, 16, 0, 0, 0, 1, 0, 2, 1, 3, 0, 1, 0, 2, 4, 2, 3, 0, 0, 5, 4, 1, 0, 2, 6, 12, 3, 0, 1, 5, 1, 10, 2, 2, 6, 12, 5, 4, 6, 12, 3, 10, 2, 5, 2, 1, 3, 10, 68, 105, 101, 109, 83, 121, 115, 116, 101, 109, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 15, 86, 97, 108, 105, 100, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 14, 103, 101, 116, 95, 104, 117, 109, 97, 110, 95, 110, 97, 109, 101, 13, 97, 100, 100, 95, 118, 97, 108, 105, 100, 97, 116, 111, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 5, 6, 18, 10, 0, 10, 1, 17, 0, 10, 3, 17, 1, 11, 2, 33, 12, 4, 11, 4, 3, 14, 11, 0, 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 39, 11, 0, 10, 3, 17, 2, 2, ]; const BURN_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 3, 4, 11, 4, 15, 2, 5, 17, 17, 7, 34, 45, 8, 79, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 1, 1, 1, 1, 4, 2, 6, 12, 3, 0, 2, 6, 12, 5, 3, 6, 12, 3, 5, 1, 9, 0, 4, 68, 105, 101, 109, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 4, 98, 117, 114, 110, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 1, 7, 10, 0, 10, 1, 17, 0, 11, 0, 10, 2, 56, 0, 2, ]; const BURN_TXN_FEES_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 2, 3, 2, 6, 4, 8, 2, 5, 10, 7, 7, 17, 25, 8, 42, 16, 0, 0, 0, 1, 0, 1, 1, 1, 0, 2, 1, 6, 12, 0, 1, 9, 0, 14, 84, 114, 97, 110, 115, 97, 99, 116, 105, 111, 110, 70, 101, 101, 9, 98, 117, 114, 110, 95, 102, 101, 101, 115, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 3, 11, 0, 56, 0, 2, ]; const CANCEL_BURN_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 2, 3, 2, 6, 4, 8, 2, 5, 10, 8, 7, 18, 24, 8, 42, 16, 0, 0, 0, 1, 0, 1, 1, 1, 0, 2, 2, 6, 12, 5, 0, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 11, 99, 97, 110, 99, 101, 108, 95, 98, 117, 114, 110, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 4, 11, 0, 10, 1, 56, 0, 2, ]; const CREATE_CHILD_VASP_ACCOUNT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 8, 1, 0, 2, 2, 2, 4, 3, 6, 22, 4, 28, 4, 5, 32, 35, 7, 67, 122, 8, 189, 1, 16, 6, 205, 1, 4, 0, 0, 0, 1, 1, 0, 0, 2, 0, 1, 1, 1, 0, 3, 2, 3, 0, 0, 4, 4, 1, 1, 1, 0, 5, 3, 1, 0, 0, 6, 2, 6, 4, 6, 12, 5, 10, 2, 1, 0, 1, 6, 12, 1, 8, 0, 5, 6, 8, 0, 5, 3, 10, 2, 10, 2, 5, 6, 12, 5, 10, 2, 1, 3, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 18, 87, 105, 116, 104, 100, 114, 97, 119, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 25, 99, 114, 101, 97, 116, 101, 95, 99, 104, 105, 108, 100, 95, 118, 97, 115, 112, 95, 97, 99, 99, 111, 117, 110, 116, 27, 101, 120, 116, 114, 97, 99, 116, 95, 119, 105, 116, 104, 100, 114, 97, 119, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 8, 112, 97, 121, 95, 102, 114, 111, 109, 27, 114, 101, 115, 116, 111, 114, 101, 95, 119, 105, 116, 104, 100, 114, 97, 119, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 10, 2, 1, 0, 1, 1, 5, 3, 25, 10, 0, 10, 1, 11, 2, 10, 3, 56, 0, 10, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 36, 3, 10, 5, 22, 11, 0, 17, 1, 12, 5, 14, 5, 10, 1, 10, 4, 7, 0, 7, 0, 56, 1, 11, 5, 17, 3, 5, 24, 11, 0, 1, 2, ]; const CREATE_DESIGNATED_DEALER_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 3, 4, 11, 4, 15, 2, 5, 17, 27, 7, 44, 72, 8, 116, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 1, 1, 1, 1, 4, 2, 6, 12, 3, 0, 5, 6, 12, 5, 10, 2, 10, 2, 1, 6, 6, 12, 3, 5, 10, 2, 10, 2, 1, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 24, 99, 114, 101, 97, 116, 101, 95, 100, 101, 115, 105, 103, 110, 97, 116, 101, 100, 95, 100, 101, 97, 108, 101, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 1, 10, 10, 0, 10, 1, 17, 0, 11, 0, 10, 2, 11, 3, 11, 4, 10, 5, 56, 0, 2, ]; const CREATE_PARENT_VASP_ACCOUNT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 3, 4, 11, 4, 15, 2, 5, 17, 27, 7, 44, 74, 8, 118, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 1, 1, 1, 1, 4, 2, 6, 12, 3, 0, 5, 6, 12, 5, 10, 2, 10, 2, 1, 6, 6, 12, 3, 5, 10, 2, 10, 2, 1, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 26, 99, 114, 101, 97, 116, 101, 95, 112, 97, 114, 101, 110, 116, 95, 118, 97, 115, 112, 95, 97, 99, 99, 111, 117, 110, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 1, 10, 10, 0, 10, 1, 17, 0, 11, 0, 10, 2, 11, 3, 11, 4, 10, 5, 56, 0, 2, ]; const CREATE_RECOVERY_ADDRESS_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 2, 4, 4, 3, 8, 10, 5, 18, 12, 7, 30, 90, 8, 120, 16, 0, 0, 0, 1, 0, 2, 1, 0, 0, 3, 0, 1, 0, 1, 4, 2, 3, 0, 1, 6, 12, 1, 8, 0, 2, 6, 12, 8, 0, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 15, 82, 101, 99, 111, 118, 101, 114, 121, 65, 100, 100, 114, 101, 115, 115, 21, 75, 101, 121, 82, 111, 116, 97, 116, 105, 111, 110, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 101, 120, 116, 114, 97, 99, 116, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 7, 112, 117, 98, 108, 105, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 5, 10, 0, 11, 0, 17, 0, 17, 1, 2, ]; const CREATE_VALIDATOR_ACCOUNT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 22, 7, 36, 72, 8, 108, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 1, 0, 2, 6, 12, 3, 0, 4, 6, 12, 5, 10, 2, 10, 2, 5, 6, 12, 3, 5, 10, 2, 10, 2, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 24, 99, 114, 101, 97, 116, 101, 95, 118, 97, 108, 105, 100, 97, 116, 111, 114, 95, 97, 99, 99, 111, 117, 110, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 9, 10, 0, 10, 1, 17, 0, 11, 0, 10, 2, 11, 3, 11, 4, 17, 1, 2, ]; const CREATE_VALIDATOR_OPERATOR_ACCOUNT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 22, 7, 36, 81, 8, 117, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 1, 0, 2, 6, 12, 3, 0, 4, 6, 12, 5, 10, 2, 10, 2, 5, 6, 12, 3, 5, 10, 2, 10, 2, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 33, 99, 114, 101, 97, 116, 101, 95, 118, 97, 108, 105, 100, 97, 116, 111, 114, 95, 111, 112, 101, 114, 97, 116, 111, 114, 95, 97, 99, 99, 111, 117, 110, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 9, 10, 0, 10, 1, 17, 0, 11, 0, 10, 2, 11, 3, 11, 4, 17, 1, 2, ]; const FREEZE_ACCOUNT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 14, 7, 28, 66, 8, 94, 16, 0, 0, 0, 1, 0, 2, 0, 1, 0, 1, 3, 2, 1, 0, 2, 6, 12, 5, 0, 2, 6, 12, 3, 3, 6, 12, 3, 5, 15, 65, 99, 99, 111, 117, 110, 116, 70, 114, 101, 101, 122, 105, 110, 103, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 14, 102, 114, 101, 101, 122, 101, 95, 97, 99, 99, 111, 117, 110, 116, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 7, 10, 0, 10, 1, 17, 1, 11, 0, 10, 2, 17, 0, 2, ]; const PEER_TO_PEER_WITH_METADATA_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 7, 1, 0, 2, 2, 2, 4, 3, 6, 16, 4, 22, 2, 5, 24, 29, 7, 53, 96, 8, 149, 1, 16, 0, 0, 0, 1, 1, 0, 0, 2, 0, 1, 0, 0, 3, 2, 3, 1, 1, 0, 4, 1, 3, 0, 1, 5, 1, 6, 12, 1, 8, 0, 5, 6, 8, 0, 5, 3, 10, 2, 10, 2, 0, 5, 6, 12, 5, 3, 10, 2, 10, 2, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 18, 87, 105, 116, 104, 100, 114, 97, 119, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 27, 101, 120, 116, 114, 97, 99, 116, 95, 119, 105, 116, 104, 100, 114, 97, 119, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 8, 112, 97, 121, 95, 102, 114, 111, 109, 27, 114, 101, 115, 116, 111, 114, 101, 95, 119, 105, 116, 104, 100, 114, 97, 119, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 4, 1, 12, 11, 0, 17, 0, 12, 5, 14, 5, 10, 1, 10, 2, 11, 3, 11, 4, 56, 0, 11, 5, 17, 2, 2, ]; const PREBURN_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 7, 1, 0, 2, 2, 2, 4, 3, 6, 16, 4, 22, 2, 5, 24, 21, 7, 45, 95, 8, 140, 1, 16, 0, 0, 0, 1, 1, 0, 0, 2, 0, 1, 0, 0, 3, 2, 3, 1, 1, 0, 4, 1, 3, 0, 1, 5, 1, 6, 12, 1, 8, 0, 3, 6, 12, 6, 8, 0, 3, 0, 2, 6, 12, 3, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 18, 87, 105, 116, 104, 100, 114, 97, 119, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 27, 101, 120, 116, 114, 97, 99, 116, 95, 119, 105, 116, 104, 100, 114, 97, 119, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 7, 112, 114, 101, 98, 117, 114, 110, 27, 114, 101, 115, 116, 111, 114, 101, 95, 119, 105, 116, 104, 100, 114, 97, 119, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 4, 1, 10, 10, 0, 17, 0, 12, 2, 11, 0, 14, 2, 10, 1, 56, 0, 11, 2, 17, 2, 2, ]; const PUBLISH_SHARED_ED25519_PUBLIC_KEY_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 2, 3, 2, 5, 5, 7, 6, 7, 13, 31, 8, 44, 16, 0, 0, 0, 1, 0, 1, 0, 2, 6, 12, 10, 2, 0, 22, 83, 104, 97, 114, 101, 100, 69, 100, 50, 53, 53, 49, 57, 80, 117, 98, 108, 105, 99, 75, 101, 121, 7, 112, 117, 98, 108, 105, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 4, 11, 0, 11, 1, 17, 0, 2, ]; const REGISTER_VALIDATOR_CONFIG_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 2, 3, 2, 5, 5, 7, 11, 7, 18, 27, 8, 45, 16, 0, 0, 0, 1, 0, 1, 0, 5, 6, 12, 5, 10, 2, 10, 2, 10, 2, 0, 15, 86, 97, 108, 105, 100, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 10, 115, 101, 116, 95, 99, 111, 110, 102, 105, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 7, 11, 0, 10, 1, 11, 2, 11, 3, 11, 4, 17, 0, 2, ]; const REMOVE_VALIDATOR_AND_RECONFIGURE_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 6, 3, 6, 15, 5, 21, 24, 7, 45, 94, 8, 139, 1, 16, 0, 0, 0, 1, 0, 2, 1, 3, 0, 1, 0, 2, 4, 2, 3, 0, 0, 5, 4, 1, 0, 2, 6, 12, 3, 0, 1, 5, 1, 10, 2, 2, 6, 12, 5, 4, 6, 12, 3, 10, 2, 5, 2, 1, 3, 10, 68, 105, 101, 109, 83, 121, 115, 116, 101, 109, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 15, 86, 97, 108, 105, 100, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 14, 103, 101, 116, 95, 104, 117, 109, 97, 110, 95, 110, 97, 109, 101, 16, 114, 101, 109, 111, 118, 101, 95, 118, 97, 108, 105, 100, 97, 116, 111, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 5, 6, 18, 10, 0, 10, 1, 17, 0, 10, 3, 17, 1, 11, 2, 33, 12, 4, 11, 4, 3, 14, 11, 0, 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 39, 11, 0, 10, 3, 17, 2, 2, ]; const ROTATE_AUTHENTICATION_KEY_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 2, 2, 2, 4, 3, 6, 15, 5, 21, 18, 7, 39, 124, 8, 163, 1, 16, 0, 0, 0, 1, 1, 0, 0, 2, 0, 1, 0, 0, 3, 1, 2, 0, 0, 4, 3, 2, 0, 1, 6, 12, 1, 8, 0, 0, 2, 6, 8, 0, 10, 2, 2, 6, 12, 10, 2, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 21, 75, 101, 121, 82, 111, 116, 97, 116, 105, 111, 110, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 101, 120, 116, 114, 97, 99, 116, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 114, 101, 115, 116, 111, 114, 101, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 25, 114, 111, 116, 97, 116, 101, 95, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 105, 111, 110, 95, 107, 101, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 1, 9, 11, 0, 17, 0, 12, 2, 14, 2, 11, 1, 17, 2, 11, 2, 17, 1, 2, ]; const ROTATE_AUTHENTICATION_KEY_WITH_NONCE_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 2, 4, 4, 3, 8, 20, 5, 28, 23, 7, 51, 159, 1, 8, 210, 1, 16, 0, 0, 0, 1, 0, 3, 1, 0, 1, 2, 0, 1, 0, 0, 4, 2, 3, 0, 0, 5, 3, 1, 0, 0, 6, 4, 1, 0, 2, 6, 12, 3, 0, 1, 6, 12, 1, 8, 0, 2, 6, 8, 0, 10, 2, 3, 6, 12, 3, 10, 2, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 21, 75, 101, 121, 82, 111, 116, 97, 116, 105, 111, 110, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 101, 120, 116, 114, 97, 99, 116, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 114, 101, 115, 116, 111, 114, 101, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 25, 114, 111, 116, 97, 116, 101, 95, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 105, 111, 110, 95, 107, 101, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 5, 3, 12, 10, 0, 10, 1, 17, 0, 11, 0, 17, 1, 12, 3, 14, 3, 11, 2, 17, 3, 11, 3, 17, 2, 2, ]; const ROTATE_AUTHENTICATION_KEY_WITH_NONCE_ADMIN_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 2, 4, 4, 3, 8, 20, 5, 28, 25, 7, 53, 159, 1, 8, 212, 1, 16, 0, 0, 0, 1, 0, 3, 1, 0, 1, 2, 0, 1, 0, 0, 4, 2, 3, 0, 0, 5, 3, 1, 0, 0, 6, 4, 1, 0, 2, 6, 12, 3, 0, 1, 6, 12, 1, 8, 0, 2, 6, 8, 0, 10, 2, 4, 6, 12, 6, 12, 3, 10, 2, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 21, 75, 101, 121, 82, 111, 116, 97, 116, 105, 111, 110, 67, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 101, 120, 116, 114, 97, 99, 116, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 31, 114, 101, 115, 116, 111, 114, 101, 95, 107, 101, 121, 95, 114, 111, 116, 97, 116, 105, 111, 110, 95, 99, 97, 112, 97, 98, 105, 108, 105, 116, 121, 25, 114, 111, 116, 97, 116, 101, 95, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 105, 111, 110, 95, 107, 101, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 5, 3, 12, 11, 0, 10, 2, 17, 0, 11, 1, 17, 1, 12, 4, 14, 4, 11, 3, 17, 3, 11, 4, 17, 2, 2, ]; const ROTATE_AUTHENTICATION_KEY_WITH_RECOVERY_ADDRESS_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 2, 3, 2, 5, 5, 7, 8, 7, 15, 42, 8, 57, 16, 0, 0, 0, 1, 0, 1, 0, 4, 6, 12, 5, 5, 10, 2, 0, 15, 82, 101, 99, 111, 118, 101, 114, 121, 65, 100, 100, 114, 101, 115, 115, 25, 114, 111, 116, 97, 116, 101, 95, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 105, 111, 110, 95, 107, 101, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 6, 11, 0, 10, 1, 10, 2, 11, 3, 17, 0, 2, ]; const ROTATE_DUAL_ATTESTATION_INFO_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 2, 3, 2, 10, 5, 12, 13, 7, 25, 61, 8, 86, 16, 0, 0, 0, 1, 0, 1, 0, 0, 2, 0, 1, 0, 2, 6, 12, 10, 2, 0, 3, 6, 12, 10, 2, 10, 2, 15, 68, 117, 97, 108, 65, 116, 116, 101, 115, 116, 97, 116, 105, 111, 110, 15, 114, 111, 116, 97, 116, 101, 95, 98, 97, 115, 101, 95, 117, 114, 108, 28, 114, 111, 116, 97, 116, 101, 95, 99, 111, 109, 112, 108, 105, 97, 110, 99, 101, 95, 112, 117, 98, 108, 105, 99, 95, 107, 101, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 7, 10, 0, 11, 1, 17, 0, 11, 0, 11, 2, 17, 1, 2, ]; const ROTATE_SHARED_ED25519_PUBLIC_KEY_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 2, 3, 2, 5, 5, 7, 6, 7, 13, 34, 8, 47, 16, 0, 0, 0, 1, 0, 1, 0, 2, 6, 12, 10, 2, 0, 22, 83, 104, 97, 114, 101, 100, 69, 100, 50, 53, 53, 49, 57, 80, 117, 98, 108, 105, 99, 75, 101, 121, 10, 114, 111, 116, 97, 116, 101, 95, 107, 101, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 4, 11, 0, 11, 1, 17, 0, 2, ]; const SET_VALIDATOR_CONFIG_AND_RECONFIGURE_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 15, 7, 29, 68, 8, 97, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 1, 0, 5, 6, 12, 5, 10, 2, 10, 2, 10, 2, 0, 2, 6, 12, 5, 10, 68, 105, 101, 109, 83, 121, 115, 116, 101, 109, 15, 86, 97, 108, 105, 100, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 10, 115, 101, 116, 95, 99, 111, 110, 102, 105, 103, 29, 117, 112, 100, 97, 116, 101, 95, 99, 111, 110, 102, 105, 103, 95, 97, 110, 100, 95, 114, 101, 99, 111, 110, 102, 105, 103, 117, 114, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 10, 10, 0, 10, 1, 11, 2, 11, 3, 11, 4, 17, 0, 11, 0, 10, 1, 17, 1, 2, ]; const SET_VALIDATOR_OPERATOR_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 19, 7, 33, 68, 8, 101, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 3, 0, 1, 5, 1, 10, 2, 2, 6, 12, 5, 0, 3, 6, 12, 10, 2, 5, 2, 1, 3, 15, 86, 97, 108, 105, 100, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 23, 86, 97, 108, 105, 100, 97, 116, 111, 114, 79, 112, 101, 114, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 14, 103, 101, 116, 95, 104, 117, 109, 97, 110, 95, 110, 97, 109, 101, 12, 115, 101, 116, 95, 111, 112, 101, 114, 97, 116, 111, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 5, 15, 10, 2, 17, 0, 11, 1, 33, 12, 3, 11, 3, 3, 11, 11, 0, 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 39, 11, 0, 10, 2, 17, 1, 2, ]; const SET_VALIDATOR_OPERATOR_WITH_NONCE_ADMIN_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 6, 3, 6, 15, 5, 21, 26, 7, 47, 103, 8, 150, 1, 16, 0, 0, 0, 1, 0, 2, 0, 3, 0, 1, 0, 2, 4, 2, 3, 0, 1, 5, 4, 1, 0, 2, 6, 12, 3, 0, 1, 5, 1, 10, 2, 2, 6, 12, 5, 5, 6, 12, 6, 12, 3, 10, 2, 5, 2, 1, 3, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 15, 86, 97, 108, 105, 100, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 23, 86, 97, 108, 105, 100, 97, 116, 111, 114, 79, 112, 101, 114, 97, 116, 111, 114, 67, 111, 110, 102, 105, 103, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 14, 103, 101, 116, 95, 104, 117, 109, 97, 110, 95, 110, 97, 109, 101, 12, 115, 101, 116, 95, 111, 112, 101, 114, 97, 116, 111, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 5, 6, 18, 11, 0, 10, 2, 17, 0, 10, 4, 17, 1, 11, 3, 33, 12, 5, 11, 5, 3, 14, 11, 1, 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 39, 11, 1, 10, 4, 17, 2, 2, ]; const TIERED_MINT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 4, 3, 4, 11, 4, 15, 2, 5, 17, 21, 7, 38, 59, 8, 97, 16, 0, 0, 0, 1, 1, 2, 0, 1, 0, 0, 3, 2, 1, 1, 1, 1, 4, 2, 6, 12, 3, 0, 4, 6, 12, 5, 3, 3, 5, 6, 12, 3, 5, 3, 3, 1, 9, 0, 11, 68, 105, 101, 109, 65, 99, 99, 111, 117, 110, 116, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 11, 116, 105, 101, 114, 101, 100, 95, 109, 105, 110, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 1, 9, 10, 0, 10, 1, 17, 0, 11, 0, 10, 2, 10, 3, 10, 4, 56, 0, 2, ]; const UNFREEZE_ACCOUNT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 14, 7, 28, 68, 8, 96, 16, 0, 0, 0, 1, 0, 2, 0, 1, 0, 1, 3, 2, 1, 0, 2, 6, 12, 5, 0, 2, 6, 12, 3, 3, 6, 12, 3, 5, 15, 65, 99, 99, 111, 117, 110, 116, 70, 114, 101, 101, 122, 105, 110, 103, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 16, 117, 110, 102, 114, 101, 101, 122, 101, 95, 97, 99, 99, 111, 117, 110, 116, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1, 7, 10, 0, 10, 1, 17, 1, 11, 0, 10, 2, 17, 0, 2, ]; const UPDATE_DIEM_VERSION_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 10, 7, 24, 51, 8, 75, 16, 0, 0, 0, 1, 0, 2, 0, 1, 0, 1, 3, 0, 1, 0, 2, 6, 12, 3, 0, 3, 6, 12, 3, 3, 11, 68, 105, 101, 109, 86, 101, 114, 115, 105, 111, 110, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 3, 115, 101, 116, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 7, 10, 0, 10, 1, 17, 1, 11, 0, 10, 2, 17, 0, 2, ]; const UPDATE_DUAL_ATTESTATION_LIMIT_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 5, 1, 0, 4, 3, 4, 10, 5, 14, 10, 7, 24, 71, 8, 95, 16, 0, 0, 0, 1, 0, 2, 0, 1, 0, 1, 3, 0, 1, 0, 2, 6, 12, 3, 0, 3, 6, 12, 3, 3, 15, 68, 117, 97, 108, 65, 116, 116, 101, 115, 116, 97, 116, 105, 111, 110, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 19, 115, 101, 116, 95, 109, 105, 99, 114, 111, 100, 105, 101, 109, 95, 108, 105, 109, 105, 116, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 7, 10, 0, 10, 1, 17, 1, 11, 0, 10, 2, 17, 0, 2, ]; const UPDATE_EXCHANGE_RATE_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 7, 1, 0, 6, 2, 6, 4, 3, 10, 16, 4, 26, 2, 5, 28, 25, 7, 53, 99, 8, 152, 1, 16, 0, 0, 0, 1, 0, 2, 1, 1, 2, 0, 1, 3, 0, 1, 0, 2, 4, 2, 3, 0, 0, 5, 4, 3, 1, 1, 2, 6, 2, 3, 3, 1, 8, 0, 2, 6, 12, 3, 0, 2, 6, 12, 8, 0, 4, 6, 12, 3, 3, 3, 1, 9, 0, 4, 68, 105, 101, 109, 12, 70, 105, 120, 101, 100, 80, 111, 105, 110, 116, 51, 50, 12, 83, 108, 105, 100, 105, 110, 103, 78, 111, 110, 99, 101, 20, 99, 114, 101, 97, 116, 101, 95, 102, 114, 111, 109, 95, 114, 97, 116, 105, 111, 110, 97, 108, 21, 114, 101, 99, 111, 114, 100, 95, 110, 111, 110, 99, 101, 95, 111, 114, 95, 97, 98, 111, 114, 116, 24, 117, 112, 100, 97, 116, 101, 95, 120, 100, 120, 95, 101, 120, 99, 104, 97, 110, 103, 101, 95, 114, 97, 116, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 5, 1, 11, 10, 0, 10, 1, 17, 1, 10, 2, 10, 3, 17, 0, 12, 4, 11, 0, 11, 4, 56, 0, 2, ]; const UPDATE_MINTING_ABILITY_CODE: &[u8] = &[ 161, 28, 235, 11, 1, 0, 0, 0, 6, 1, 0, 2, 3, 2, 6, 4, 8, 2, 5, 10, 8, 7, 18, 28, 8, 46, 16, 0, 0, 0, 1, 0, 1, 1, 1, 0, 2, 2, 6, 12, 1, 0, 1, 9, 0, 4, 68, 105, 101, 109, 22, 117, 112, 100, 97, 116, 101, 95, 109, 105, 110, 116, 105, 110, 103, 95, 97, 98, 105, 108, 105, 116, 121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 4, 11, 0, 10, 1, 56, 0, 2, ];
70.289668
391
0.530188
39b41269463e9eef695855e73718a4db6edbb662
117,074
use self::Suffix::*; use self::TargetFeature::*; use std::env; use std::fs::File; use std::io::prelude::*; use std::io::{self, BufReader}; use std::path::PathBuf; const IN: &str = "neon.spec"; const ARM_OUT: &str = "generated.rs"; const AARCH64_OUT: &str = "generated.rs"; const UINT_TYPES: [&str; 6] = [ "uint8x8_t", "uint8x16_t", "uint16x4_t", "uint16x8_t", "uint32x2_t", "uint32x4_t", ]; const UINT_TYPES_64: [&str; 2] = ["uint64x1_t", "uint64x2_t"]; const INT_TYPES: [&str; 6] = [ "int8x8_t", "int8x16_t", "int16x4_t", "int16x8_t", "int32x2_t", "int32x4_t", ]; const INT_TYPES_64: [&str; 2] = ["int64x1_t", "int64x2_t"]; const FLOAT_TYPES: [&str; 2] = [ //"float8x8_t", not supported by rust //"float8x16_t", not supported by rust //"float16x4_t", not supported by rust //"float16x8_t", not supported by rust "float32x2_t", "float32x4_t", ]; const FLOAT_TYPES_64: [&str; 2] = [ //"float8x8_t", not supported by rust //"float8x16_t", not supported by rust //"float16x4_t", not supported by rust //"float16x8_t", not supported by rust "float64x1_t", "float64x2_t", ]; fn type_len(t: &str) -> usize { let s: Vec<_> = t.split("x").collect(); if s.len() == 2 { match &s[1][0..2] { "1_" => 1, "2_" => 2, "4_" => 4, "8_" => 8, "16" => 16, _ => panic!("unknown type: {}", t), } } else if s.len() == 3 { s[1].parse::<usize>().unwrap() * type_sub_len(t) } else { 1 } } fn type_sub_len(t: &str) -> usize { let s: Vec<_> = t.split('x').collect(); if s.len() != 3 { 1 } else { match s[2] { "2_t" => 2, "3_t" => 3, "4_t" => 4, _ => panic!("unknown type len: {}", t), } } } fn type_bits(t: &str) -> usize { match t { "int8x8_t" | "int8x16_t" | "uint8x8_t" | "uint8x16_t" | "poly8x8_t" | "poly8x16_t" | "i8" | "u8" => 8, "int16x4_t" | "int16x8_t" | "uint16x4_t" | "uint16x8_t" | "poly16x4_t" | "poly16x8_t" | "i16" | "u16" => 16, "int32x2_t" | "int32x4_t" | "uint32x2_t" | "uint32x4_t" | "i32" | "u32" | "float32x2_t" | "float32x4_t" | "f32" => 32, "int64x1_t" | "int64x2_t" | "uint64x1_t" | "uint64x2_t" | "poly64x1_t" | "poly64x2_t" | "i64" | "u64" | "float64x1_t" | "float64x2_t" | "f64" => 64, _ => panic!("unknown type: {}", t), } } fn type_exp_len(t: &str, base_len: usize) -> usize { let t = type_to_sub_type(t); let len = type_len(&t) / base_len; match len { 1 => 0, 2 => 1, 4 => 2, 8 => 3, 16 => 4, _ => panic!("unknown type: {}", t), } } fn type_bits_exp_len(t: &str) -> usize { match t { "int8x8_t" | "int8x16_t" | "uint8x8_t" | "uint8x16_t" | "poly8x8_t" | "poly8x16_t" | "i8" | "u8" => 3, "int16x4_t" | "int16x8_t" | "uint16x4_t" | "uint16x8_t" | "poly16x4_t" | "poly16x8_t" | "i16" | "u16" => 4, "int32x2_t" | "int32x4_t" | "uint32x2_t" | "uint32x4_t" | "i32" | "u32" => 5, "int64x1_t" | "int64x2_t" | "uint64x1_t" | "uint64x2_t" | "poly64x1_t" | "poly64x2_t" | "i64" | "u64" => 6, _ => panic!("unknown type: {}", t), } } fn type_to_suffix(t: &str) -> &str { match t { "int8x8_t" => "_s8", "int8x16_t" => "q_s8", "int16x4_t" => "_s16", "int16x8_t" => "q_s16", "int32x2_t" => "_s32", "int32x4_t" => "q_s32", "int64x1_t" => "_s64", "int64x2_t" => "q_s64", "uint8x8_t" => "_u8", "uint8x16_t" => "q_u8", "uint16x4_t" => "_u16", "uint16x8_t" => "q_u16", "uint32x2_t" => "_u32", "uint32x4_t" => "q_u32", "uint64x1_t" => "_u64", "uint64x2_t" => "q_u64", "float16x4_t" => "_f16", "float16x8_t" => "q_f16", "float32x2_t" => "_f32", "float32x4_t" => "q_f32", "float64x1_t" => "_f64", "float64x2_t" => "q_f64", "poly8x8_t" => "_p8", "poly8x16_t" => "q_p8", "poly16x4_t" => "_p16", "poly16x8_t" => "q_p16", "poly64x1_t" => "_p64", "poly64x2_t" => "q_p64", "int8x8x2_t" => "_s8_x2", "int8x8x3_t" => "_s8_x3", "int8x8x4_t" => "_s8_x4", "int16x4x2_t" => "_s16_x2", "int16x4x3_t" => "_s16_x3", "int16x4x4_t" => "_s16_x4", "int32x2x2_t" => "_s32_x2", "int32x2x3_t" => "_s32_x3", "int32x2x4_t" => "_s32_x4", "int64x1x2_t" => "_s64_x2", "int64x1x3_t" => "_s64_x3", "int64x1x4_t" => "_s64_x4", "uint8x8x2_t" => "_u8_x2", "uint8x8x3_t" => "_u8_x3", "uint8x8x4_t" => "_u8_x4", "uint16x4x2_t" => "_u16_x2", "uint16x4x3_t" => "_u16_x3", "uint16x4x4_t" => "_u16_x4", "uint32x2x2_t" => "_u32_x2", "uint32x2x3_t" => "_u32_x3", "uint32x2x4_t" => "_u32_x4", "uint64x1x2_t" => "_u64_x2", "uint64x1x3_t" => "_u64_x3", "uint64x1x4_t" => "_u64_x4", "poly8x8x2_t" => "_p8_x2", "poly8x8x3_t" => "_p8_x3", "poly8x8x4_t" => "_p8_x4", "poly16x4x2_t" => "_p16_x2", "poly16x4x3_t" => "_p16_x3", "poly16x4x4_t" => "_p16_x4", "poly64x1x2_t" => "_p64_x2", "poly64x1x3_t" => "_p64_x3", "poly64x1x4_t" => "_p64_x4", "float32x2x2_t" => "_f32_x2", "float32x2x3_t" => "_f32_x3", "float32x2x4_t" => "_f32_x4", "float64x1x2_t" => "_f64_x2", "float64x1x3_t" => "_f64_x3", "float64x1x4_t" => "_f64_x4", "int8x16x2_t" => "q_s8_x2", "int8x16x3_t" => "q_s8_x3", "int8x16x4_t" => "q_s8_x4", "int16x8x2_t" => "q_s16_x2", "int16x8x3_t" => "q_s16_x3", "int16x8x4_t" => "q_s16_x4", "int32x4x2_t" => "q_s32_x2", "int32x4x3_t" => "q_s32_x3", "int32x4x4_t" => "q_s32_x4", "int64x2x2_t" => "q_s64_x2", "int64x2x3_t" => "q_s64_x3", "int64x2x4_t" => "q_s64_x4", "uint8x16x2_t" => "q_u8_x2", "uint8x16x3_t" => "q_u8_x3", "uint8x16x4_t" => "q_u8_x4", "uint16x8x2_t" => "q_u16_x2", "uint16x8x3_t" => "q_u16_x3", "uint16x8x4_t" => "q_u16_x4", "uint32x4x2_t" => "q_u32_x2", "uint32x4x3_t" => "q_u32_x3", "uint32x4x4_t" => "q_u32_x4", "uint64x2x2_t" => "q_u64_x2", "uint64x2x3_t" => "q_u64_x3", "uint64x2x4_t" => "q_u64_x4", "poly8x16x2_t" => "q_p8_x2", "poly8x16x3_t" => "q_p8_x3", "poly8x16x4_t" => "q_p8_x4", "poly16x8x2_t" => "q_p16_x2", "poly16x8x3_t" => "q_p16_x3", "poly16x8x4_t" => "q_p16_x4", "poly64x2x2_t" => "q_p64_x2", "poly64x2x3_t" => "q_p64_x3", "poly64x2x4_t" => "q_p64_x4", "float32x4x2_t" => "q_f32_x2", "float32x4x3_t" => "q_f32_x3", "float32x4x4_t" => "q_f32_x4", "float64x2x2_t" => "q_f64_x2", "float64x2x3_t" => "q_f64_x3", "float64x2x4_t" => "q_f64_x4", "i8" => "b_s8", "i16" => "h_s16", "i32" => "s_s32", "i64" => "d_s64", "u8" => "b_u8", "u16" => "h_u16", "u32" => "s_u32", "u64" => "d_u64", "f32" => "s_f32", "f64" => "d_f64", "p8" => "b_p8", "p16" => "h_p16", "p128" => "q_p128", _ => panic!("unknown type: {}", t), } } fn type_to_dup_suffix(t: &str) -> String { let s: Vec<_> = type_to_suffix(t).split('_').collect(); assert_eq!(s.len(), 2); format!("{}_dup_{}", s[0], s[1]) } fn type_to_lane_suffix(t: &str) -> String { let s: Vec<_> = type_to_suffix(t).split('_').collect(); assert_eq!(s.len(), 2); format!("{}_lane_{}", s[0], s[1]) } fn type_to_n_suffix(t: &str) -> &str { match t { "int8x8_t" => "_n_s8", "int8x16_t" => "q_n_s8", "int16x4_t" => "_n_s16", "int16x8_t" => "q_n_s16", "int32x2_t" => "_n_s32", "int32x4_t" => "q_n_s32", "int64x1_t" => "_n_s64", "int64x2_t" => "q_n_s64", "uint8x8_t" => "_n_u8", "uint8x16_t" => "q_n_u8", "uint16x4_t" => "_n_u16", "uint16x8_t" => "q_n_u16", "uint32x2_t" => "_n_u32", "uint32x4_t" => "q_n_u32", "uint64x1_t" => "_n_u64", "uint64x2_t" => "q_n_u64", "float16x4_t" => "_n_f16", "float16x8_t" => "q_n_f16", "float32x2_t" => "_n_f32", "float32x4_t" => "q_n_f32", "float64x1_t" => "_n_f64", "float64x2_t" => "q_n_f64", "poly8x8_t" => "_n_p8", "poly8x16_t" => "q_n_p8", "poly16x4_t" => "_n_p16", "poly16x8_t" => "q_n_p16", "poly64x1_t" => "_n_p64", "poly64x2_t" => "q_n_p64", "i8" => "b_n_s8", "i16" => "h_n_s16", "i32" => "s_n_s32", "i64" => "d_n_s64", "u8" => "b_n_u8", "u16" => "h_n_u16", "u32" => "s_n_u32", "u64" => "d_n_u64", _ => panic!("unknown type: {}", t), } } fn type_to_noq_n_suffix(t: &str) -> &str { match t { "int8x8_t" | "int8x16_t" => "_n_s8", "int16x4_t" | "int16x8_t" => "_n_s16", "int32x2_t" | "int32x4_t" => "_n_s32", "int64x1_t" | "int64x2_t" => "_n_s64", "uint8x8_t" | "uint8x16_t" => "_n_u8", "uint16x4_t" | "uint16x8_t" => "_n_u16", "uint32x2_t" | "uint32x4_t" => "_n_u32", "uint64x1_t" | "uint64x2_t" => "_n_u64", "float16x4_t" | "float16x8_t" => "_n_f16", "float32x2_t" | "float32x4_t" => "_n_f32", "float64x1_t" | "float64x2_t" => "_n_f64", "poly8x8_t" | "poly8x16_t" => "_n_p8", "poly16x4_t" | "poly16x8_t" => "_n_p16", "poly64x1_t" | "poly64x2_t" => "_n_p64", "i8" => "b_n_s8", "i16" => "h_n_s16", "i32" => "s_n_s32", "i64" => "d_n_s64", "u8" => "b_n_u8", "u16" => "h_n_u16", "u32" => "s_n_u32", "u64" => "d_n_u64", _ => panic!("unknown type: {}", t), } } fn type_to_lane_suffixes<'a>(out_t: &'a str, in_t: &'a str, re_to_out: bool) -> String { let mut str = String::new(); let suf = type_to_suffix(out_t); if !suf.starts_with("_") { str.push_str(&suf[0..1]); } str.push_str("_lane"); if !re_to_out { str.push_str(type_to_suffix(in_t)); } else { if type_to_suffix(in_t).starts_with("q") { str.push_str("q"); }; let suf2 = type_to_noq_suffix(out_t); str.push_str(suf2); } str } fn type_to_rot_suffix(c_name: &str, suf: &str) -> String { let ns: Vec<_> = c_name.split('_').collect(); assert_eq!(ns.len(), 2); if suf.starts_with("q") { format!("{}q_{}{}", ns[0], ns[1], &suf[1..]) } else { format!("{}{}", c_name, suf) } } fn type_to_signed(t: &str) -> String { let s = t.replace("uint", "int"); let s = s.replace("poly", "int"); s } fn type_to_unsigned(t: &str) -> String { if t.contains("uint") { return t.to_string(); } let s = t.replace("int", "uint"); let s = s.replace("poly", "uint"); s } fn type_to_double_suffixes<'a>(out_t: &'a str, in_t: &'a str) -> String { let mut str = String::new(); let suf = type_to_suffix(in_t); if suf.starts_with("q") && type_to_suffix(out_t).starts_with("q") { str.push_str("q"); } if !suf.starts_with("_") && !suf.starts_with("q") { str.push_str(&suf[0..1]); } str.push_str(type_to_noq_suffix(out_t)); str.push_str(type_to_noq_suffix(in_t)); str } fn type_to_double_n_suffixes<'a>(out_t: &'a str, in_t: &'a str) -> String { let mut str = String::new(); let suf = type_to_suffix(in_t); if suf.starts_with("q") && type_to_suffix(out_t).starts_with("q") { str.push_str("q"); } if !suf.starts_with("_") && !suf.starts_with("q") { str.push_str(&suf[0..1]); } str.push_str("_n"); str.push_str(type_to_noq_suffix(out_t)); str.push_str(type_to_noq_suffix(in_t)); str } fn type_to_noq_double_suffixes<'a>(out_t: &'a str, in_t: &'a str) -> String { let mut str = String::new(); str.push_str(type_to_noq_suffix(out_t)); str.push_str(type_to_noq_suffix(in_t)); str } fn type_to_noq_suffix(t: &str) -> &str { match t { "int8x8_t" | "int8x16_t" | "i8" => "_s8", "int16x4_t" | "int16x8_t" | "i16" => "_s16", "int32x2_t" | "int32x4_t" | "i32" => "_s32", "int64x1_t" | "int64x2_t" | "i64" => "_s64", "uint8x8_t" | "uint8x16_t" | "u8" => "_u8", "uint16x4_t" | "uint16x8_t" | "u16" => "_u16", "uint32x2_t" | "uint32x4_t" | "u32" => "_u32", "uint64x1_t" | "uint64x2_t" | "u64" => "_u64", "float16x4_t" | "float16x8_t" => "_f16", "float32x2_t" | "float32x4_t" | "f32" => "_f32", "float64x1_t" | "float64x2_t" | "f64" => "_f64", "poly8x8_t" | "poly8x16_t" => "_p8", "poly16x4_t" | "poly16x8_t" => "_p16", "poly64x1_t" | "poly64x2_t" | "p64" => "_p64", "p128" => "_p128", _ => panic!("unknown type: {}", t), } } #[derive(Clone, Copy)] enum Suffix { Normal, Double, NoQ, NoQDouble, NSuffix, DoubleN, NoQNSuffix, OutSuffix, OutNSuffix, OutNox, In1Nox, OutDupNox, OutLaneNox, In1LaneNox, Lane, In2, In2Lane, OutLane, Rot, RotLane, } #[derive(Clone, Copy)] enum TargetFeature { Default, ArmV7, Vfp4, FPArmV8, AES, FCMA, Dotprod, I8MM, SHA3, RDM, SM4, FTTS, } #[derive(Clone, Copy)] enum Fntype { Normal, Load, Store, } fn type_to_global_type(t: &str) -> &str { match t { "int8x8_t" | "int8x8x2_t" | "int8x8x3_t" | "int8x8x4_t" => "i8x8", "int8x16_t" | "int8x16x2_t" | "int8x16x3_t" | "int8x16x4_t" => "i8x16", "int16x4_t" | "int16x4x2_t" | "int16x4x3_t" | "int16x4x4_t" => "i16x4", "int16x8_t" | "int16x8x2_t" | "int16x8x3_t" | "int16x8x4_t" => "i16x8", "int32x2_t" | "int32x2x2_t" | "int32x2x3_t" | "int32x2x4_t" => "i32x2", "int32x4_t" | "int32x4x2_t" | "int32x4x3_t" | "int32x4x4_t" => "i32x4", "int64x1_t" | "int64x1x2_t" | "int64x1x3_t" | "int64x1x4_t" => "i64x1", "int64x2_t" | "int64x2x2_t" | "int64x2x3_t" | "int64x2x4_t" => "i64x2", "uint8x8_t" | "uint8x8x2_t" | "uint8x8x3_t" | "uint8x8x4_t" => "u8x8", "uint8x16_t" | "uint8x16x2_t" | "uint8x16x3_t" | "uint8x16x4_t" => "u8x16", "uint16x4_t" | "uint16x4x2_t" | "uint16x4x3_t" | "uint16x4x4_t" => "u16x4", "uint16x8_t" | "uint16x8x2_t" | "uint16x8x3_t" | "uint16x8x4_t" => "u16x8", "uint32x2_t" | "uint32x2x2_t" | "uint32x2x3_t" | "uint32x2x4_t" => "u32x2", "uint32x4_t" | "uint32x4x2_t" | "uint32x4x3_t" | "uint32x4x4_t" => "u32x4", "uint64x1_t" | "uint64x1x2_t" | "uint64x1x3_t" | "uint64x1x4_t" => "u64x1", "uint64x2_t" | "uint64x2x2_t" | "uint64x2x3_t" | "uint64x2x4_t" => "u64x2", "float16x4_t" => "f16x4", "float16x8_t" => "f16x8", "float32x2_t" | "float32x2x2_t" | "float32x2x3_t" | "float32x2x4_t" => "f32x2", "float32x4_t" | "float32x4x2_t" | "float32x4x3_t" | "float32x4x4_t" => "f32x4", "float64x1_t" | "float64x1x2_t" | "float64x1x3_t" | "float64x1x4_t" => "f64", "float64x2_t" | "float64x2x2_t" | "float64x2x3_t" | "float64x2x4_t" => "f64x2", "poly8x8_t" | "poly8x8x2_t" | "poly8x8x3_t" | "poly8x8x4_t" => "i8x8", "poly8x16_t" | "poly8x16x2_t" | "poly8x16x3_t" | "poly8x16x4_t" => "i8x16", "poly16x4_t" | "poly16x4x2_t" | "poly16x4x3_t" | "poly16x4x4_t" => "i16x4", "poly16x8_t" | "poly16x8x2_t" | "poly16x8x3_t" | "poly16x8x4_t" => "i16x8", "poly64x1_t" | "poly64x1x2_t" | "poly64x1x3_t" | "poly64x1x4_t" => "i64x1", "poly64x2_t" | "poly64x2x2_t" | "poly64x2x3_t" | "poly64x2x4_t" => "i64x2", "i8" => "i8", "i16" => "i16", "i32" => "i32", "i64" => "i64", "u8" => "u8", "u16" => "u16", "u32" => "u32", "u64" => "u64", "f32" => "f32", "f64" => "f64", "p8" => "p8", "p16" => "p16", "p64" => "p64", "p128" => "p128", _ => panic!("unknown type: {}", t), } } fn type_to_sub_type(t: &str) -> String { let s: Vec<_> = t.split('x').collect(); match s.len() { 2 => String::from(t), 3 => format!("{}x{}_t", s[0], s[1]), _ => panic!("unknown type: {}", t), } } fn type_to_native_type(t: &str) -> String { let s: Vec<_> = t.split('x').collect(); match s.len() { 1 => { assert!(t.contains("*const") || t.contains("*mut")); let sub: Vec<_> = t.split(' ').collect(); String::from(sub[1]) } 2 | 3 => match &s[0][0..3] { "int" => format!("i{}", &s[0][3..]), "uin" => format!("u{}", &s[0][4..]), "flo" => format!("f{}", &s[0][5..]), "pol" => format!("u{}", &s[0][4..]), _ => panic!("unknown type: {}", t), }, _ => panic!("unknown type: {}", t), } } fn native_type_to_type(t: &str) -> &str { match t { "i8" => "int8x8_t", "i16" => "int16x4_t", "i32" => "int32x2_t", "i64" => "int64x1_t", "u8" => "uint8x8_t", "u16" => "uint16x4_t", "u32" => "uint32x2_t", "u64" => "uint64x1_t", "f16" => "float16x4_t", "f32" => "float32x2_t", "f64" => "float64x1_t", _ => panic!("unknown type: {}", t), } } fn native_type_to_long_type(t: &str) -> &str { match t { "i8" => "int8x16_t", "i16" => "int16x8_t", "i32" => "int32x4_t", "i64" => "int64x2_t", "u8" => "uint8x16_t", "u16" => "uint16x8_t", "u32" => "uint32x4_t", "u64" => "uint64x2_t", "f16" => "float16x8_t", "f32" => "float32x4_t", "f64" => "float64x2_t", _ => panic!("unknown type: {}", t), } } fn type_to_half(t: &str) -> &str { match t { "int8x16_t" => "int8x8_t", "int16x8_t" => "int16x4_t", "int32x4_t" => "int32x2_t", "int64x2_t" => "int64x1_t", "uint8x16_t" => "uint8x8_t", "uint16x8_t" => "uint16x4_t", "uint32x4_t" => "uint32x2_t", "uint64x2_t" => "uint64x1_t", "poly8x16_t" => "poly8x8_t", "poly16x8_t" => "poly16x4_t", "float32x4_t" => "float32x2_t", "float64x2_t" => "float64x1_t", _ => panic!("unknown half type for {}", t), } } fn asc(start: i32, len: usize) -> String { let mut s = String::from("["); for i in 0..len { if i != 0 { s.push_str(", "); } let n = start + i as i32; s.push_str(&n.to_string()); } s.push_str("]"); s } fn transpose1(x: usize) -> &'static str { match x { 2 => "[0, 2]", 4 => "[0, 4, 2, 6]", 8 => "[0, 8, 2, 10, 4, 12, 6, 14]", 16 => "[0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]", _ => panic!("unknown transpose order of len {}", x), } } fn transpose2(x: usize) -> &'static str { match x { 2 => "[1, 3]", 4 => "[1, 5, 3, 7]", 8 => "[1, 9, 3, 11, 5, 13, 7, 15]", 16 => "[1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]", _ => panic!("unknown transpose order of len {}", x), } } fn zip1(x: usize) -> &'static str { match x { 2 => "[0, 2]", 4 => "[0, 4, 1, 5]", 8 => "[0, 8, 1, 9, 2, 10, 3, 11]", 16 => "[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]", _ => panic!("unknown zip order of len {}", x), } } fn zip2(x: usize) -> &'static str { match x { 2 => "[1, 3]", 4 => "[2, 6, 3, 7]", 8 => "[4, 12, 5, 13, 6, 14, 7, 15]", 16 => "[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]", _ => panic!("unknown zip order of len {}", x), } } fn unzip1(x: usize) -> &'static str { match x { 2 => "[0, 2]", 4 => "[0, 2, 4, 6]", 8 => "[0, 2, 4, 6, 8, 10, 12, 14]", 16 => "[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]", _ => panic!("unknown unzip order of len {}", x), } } fn unzip2(x: usize) -> &'static str { match x { 2 => "[1, 3]", 4 => "[1, 3, 5, 7]", 8 => "[1, 3, 5, 7, 9, 11, 13, 15]", 16 => "[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]", _ => panic!("unknown unzip order of len {}", x), } } fn values(t: &str, vs: &[String]) -> String { if vs.len() == 1 && !t.contains('x') { format!(": {} = {}", t, vs[0]) } else if vs.len() == 1 && type_to_global_type(t) == "f64" { format!(": {} = {}", type_to_global_type(t), vs[0]) } else { let s: Vec<_> = t.split('x').collect(); if s.len() == 3 { format!( ": [{}; {}] = [{}]", type_to_native_type(t), type_len(t), vs.iter() .map(|v| map_val(type_to_global_type(t), v)) //.map(|v| format!("{}{}", v, type_to_native_type(t))) .collect::<Vec<_>>() .join(", ") ) } else { format!( ": {} = {}::new({})", type_to_global_type(t), type_to_global_type(t), vs.iter() .map(|v| map_val(type_to_global_type(t), v)) //.map(|v| format!("{}{}", v, type_to_native_type(t))) .collect::<Vec<_>>() .join(", ") ) } } } fn max_val(t: &str) -> &'static str { match &t[..3] { "u8x" => "0xFF", "u16" => "0xFF_FF", "u32" => "0xFF_FF_FF_FF", "u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", "i8x" => "0x7F", "i16" => "0x7F_FF", "i32" => "0x7F_FF_FF_FF", "i64" => "0x7F_FF_FF_FF_FF_FF_FF_FF", "f32" => "3.40282347e+38", "f64" => "1.7976931348623157e+308", _ => panic!("No TRUE for type {}", t), } } fn min_val(t: &str) -> &'static str { match &t[..3] { "u8x" => "0", "u16" => "0", "u32" => "0", "u64" => "0", "i8x" => "-128", "i16" => "-32768", "i32" => "-2147483648", "i64" => "-9223372036854775808", "f32" => "-3.40282347e+38", "f64" => "-1.7976931348623157e+308", _ => panic!("No TRUE for type {}", t), } } fn true_val(t: &str) -> &'static str { match &t[..3] { "u8x" => "0xFF", "u16" => "0xFF_FF", "u32" => "0xFF_FF_FF_FF", "u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", _ => panic!("No TRUE for type {}", t), } } fn ff_val(t: &str) -> &'static str { match &t[..3] { "u8x" => "0xFF", "u16" => "0xFF_FF", "u32" => "0xFF_FF_FF_FF", "u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", "i8x" => "0xFF", "i16" => "0xFF_FF", "i32" => "0xFF_FF_FF_FF", "i64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", _ => panic!("No TRUE for type {}", t), } } fn false_val(_t: &str) -> &'static str { "0" } fn bits(t: &str) -> &'static str { match &t[..3] { "u8x" => "8", "u16" => "16", "u32" => "32", "u64" => "64", "i8x" => "8", "i16" => "16", "i32" => "32", "i64" => "64", "p8x" => "8", "p16" => "16", "p64" => "64", _ => panic!("Unknown bits for type {}", t), } } fn bits_minus_one(t: &str) -> &'static str { match &t[..3] { "u8x" => "7", "u16" => "15", "u32" => "31", "u64" => "63", "i8x" => "7", "i16" => "15", "i32" => "31", "i64" => "63", "p8x" => "7", "p16" => "15", "p64" => "63", _ => panic!("Unknown bits for type {}", t), } } fn half_bits(t: &str) -> &'static str { match &t[..3] { "u8x" => "4", "u16" => "8", "u32" => "16", "u64" => "32", "i8x" => "4", "i16" => "8", "i32" => "16", "i64" => "32", "p8x" => "4", "p16" => "8", "p64" => "32", _ => panic!("Unknown bits for type {}", t), } } fn type_len_str(t: &str) -> &'static str { match t { "int8x8_t" => "8", "int8x16_t" => "16", "int16x4_t" => "4", "int16x8_t" => "8", "int32x2_t" => "2", "int32x4_t" => "4", "int64x1_t" => "1", "int64x2_t" => "2", "uint8x8_t" => "8", "uint8x16_t" => "16", "uint16x4_t" => "4", "uint16x8_t" => "8", "uint32x2_t" => "2", "uint32x4_t" => "4", "uint64x1_t" => "1", "uint64x2_t" => "2", "float16x4_t" => "4", "float16x8_t" => "8", "float32x2_t" => "2", "float32x4_t" => "4", "float64x1_t" => "1", "float64x2_t" => "2", "poly8x8_t" => "8", "poly8x16_t" => "16", "poly16x4_t" => "4", "poly16x8_t" => "8", "poly64x1_t" => "1", "poly64x2_t" => "2", _ => panic!("unknown type: {}", t), } } fn type_half_len_str(t: &str) -> &'static str { match t { "int8x8_t" => "4", "int8x16_t" => "8", "int16x4_t" => "2", "int16x8_t" => "4", "int32x2_t" => "1", "int32x4_t" => "2", "int64x1_t" => "0", "int64x2_t" => "1", "uint8x8_t" => "4", "uint8x16_t" => "8", "uint16x4_t" => "2", "uint16x8_t" => "4", "uint32x2_t" => "1", "uint32x4_t" => "2", "uint64x1_t" => "0", "uint64x2_t" => "1", "float16x4_t" => "2", "float16x8_t" => "4", "float32x2_t" => "1", "float32x4_t" => "2", "float64x1_t" => "0", "float64x2_t" => "1", "poly8x8_t" => "4", "poly8x16_t" => "8", "poly16x4_t" => "2", "poly16x8_t" => "4", "poly64x1_t" => "0", "poly64x2_t" => "1", _ => panic!("unknown type: {}", t), } } fn map_val<'v>(t: &str, v: &'v str) -> &'v str { match v { "FALSE" => false_val(t), "TRUE" => true_val(t), "MAX" => max_val(t), "MIN" => min_val(t), "FF" => ff_val(t), "BITS" => bits(t), "BITS_M1" => bits_minus_one(t), "HFBITS" => half_bits(t), "LEN" => type_len_str(t), "HFLEN" => type_half_len_str(t), o => o, } } fn type_to_ext(t: &str, v: bool, r: bool, pi8: bool) -> String { if !t.contains('x') { return t.replace("u", "i"); } let native = type_to_native_type(t); let sub_ext = match type_sub_len(t) { 1 => String::new(), _ if v => format!( ".p0v{}{}", &type_len(&type_to_sub_type(t)).to_string(), native ), _ if pi8 => format!(".p0i8"), _ => format!(".p0{}", native), }; let sub_type = match &native[0..1] { "i" | "f" => native, "u" => native.replace("u", "i"), _ => panic!("unknown type: {}", t), }; let ext = format!( "v{}{}{}", &type_len(&type_to_sub_type(t)).to_string(), sub_type, sub_ext ); if r { let ss: Vec<_> = ext.split('.').collect(); if ss.len() != 2 { ext } else { format!("{}.{}", ss[1], ss[0]) } } else { ext } } fn ext(s: &str, in_t: &[&str; 3], out_t: &str) -> String { s.replace("_EXT_", &type_to_ext(in_t[0], false, false, false)) .replace("_EXT2_", &type_to_ext(out_t, false, false, false)) .replace("_EXT3_", &type_to_ext(in_t[1], false, false, false)) .replace("_EXT4_", &type_to_ext(in_t[2], false, false, false)) .replace("_EXTr3_", &type_to_ext(in_t[1], false, true, false)) .replace("_EXTv2_", &type_to_ext(out_t, true, false, false)) .replace("_EXTpi8_", &type_to_ext(in_t[1], false, false, true)) .replace("_EXTpi82_", &type_to_ext(out_t, false, false, true)) .replace("_EXTpi8r_", &type_to_ext(in_t[1], false, true, true)) } fn is_vldx(name: &str) -> bool { let s: Vec<_> = name.split('_').collect(); &name[0..3] == "vld" && name[3..4].parse::<i32>().unwrap() > 1 && (s.last().unwrap().starts_with("s") || s.last().unwrap().starts_with("f")) } fn is_vstx(name: &str) -> bool { let s: Vec<_> = name.split('_').collect(); s.len() == 2 && &name[0..3] == "vst" && name[3..4].parse::<i32>().unwrap() > 1 && (s[1].starts_with("s") || s[1].starts_with("f")) } #[allow(clippy::too_many_arguments)] fn gen_aarch64( current_comment: &str, current_fn: &Option<String>, current_name: &str, current_aarch64: &Option<String>, link_aarch64: &Option<String>, const_aarch64: &Option<String>, constn: &Option<String>, in_t: &[&str; 3], out_t: &str, current_tests: &[( Vec<String>, Vec<String>, Vec<String>, Option<String>, Vec<String>, )], suffix: Suffix, para_num: i32, target: TargetFeature, fixed: &Vec<String>, multi_fn: &Vec<String>, fn_type: Fntype, ) -> (String, String) { let name = match suffix { Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), Double => format!( "{}{}", current_name, type_to_double_suffixes(out_t, in_t[1]) ), NoQDouble => format!( "{}{}", current_name, type_to_noq_double_suffixes(out_t, in_t[1]) ), NSuffix => format!("{}{}", current_name, type_to_n_suffix(in_t[1])), DoubleN => format!( "{}{}", current_name, type_to_double_n_suffixes(out_t, in_t[1]) ), NoQNSuffix => format!("{}{}", current_name, type_to_noq_n_suffix(in_t[1])), OutSuffix => format!("{}{}", current_name, type_to_suffix(out_t)), OutNSuffix => format!("{}{}", current_name, type_to_n_suffix(out_t)), OutNox => format!( "{}{}", current_name, type_to_suffix(&type_to_sub_type(out_t)) ), In1Nox => format!( "{}{}", current_name, type_to_suffix(&type_to_sub_type(in_t[1])) ), OutDupNox => format!( "{}{}", current_name, type_to_dup_suffix(&type_to_sub_type(out_t)) ), OutLaneNox => format!( "{}{}", current_name, type_to_lane_suffix(&type_to_sub_type(out_t)) ), In1LaneNox => format!( "{}{}", current_name, type_to_lane_suffix(&type_to_sub_type(in_t[1])) ), Lane => format!( "{}{}", current_name, type_to_lane_suffixes(out_t, in_t[1], false) ), In2 => format!("{}{}", current_name, type_to_suffix(in_t[2])), In2Lane => format!( "{}{}", current_name, type_to_lane_suffixes(out_t, in_t[2], false) ), OutLane => format!( "{}{}", current_name, type_to_lane_suffixes(out_t, in_t[2], true) ), Rot => type_to_rot_suffix(current_name, type_to_suffix(out_t)), RotLane => type_to_rot_suffix(current_name, &type_to_lane_suffixes(out_t, in_t[2], false)), }; let current_target = match target { Default => "neon", ArmV7 => "neon", Vfp4 => "neon", FPArmV8 => "neon", AES => "neon,aes", FCMA => "neon,fcma", Dotprod => "neon,dotprod", I8MM => "neon,i8mm", SHA3 => "neon,sha3", RDM => "rdm", SM4 => "neon,sm4", FTTS => "neon,frintts", }; let current_fn = if let Some(current_fn) = current_fn.clone() { if link_aarch64.is_some() { panic!("[{}] Can't specify link and fn at the same time.", name) } current_fn } else if link_aarch64.is_some() { format!("{}_", name) } else { if multi_fn.is_empty() { panic!( "[{}] Either (multi) fn or link-aarch have to be specified.", name ) } String::new() }; let current_aarch64 = current_aarch64.clone().unwrap(); let mut link_t: Vec<String> = vec![ in_t[0].to_string(), in_t[1].to_string(), in_t[2].to_string(), out_t.to_string(), ]; let mut ext_c = String::new(); if let Some(mut link_aarch64) = link_aarch64.clone() { if link_aarch64.contains(":") { let links: Vec<_> = link_aarch64.split(':').map(|v| v.to_string()).collect(); assert_eq!(links.len(), 5); link_aarch64 = links[0].to_string(); link_t = vec![ links[1].clone(), links[2].clone(), links[3].clone(), links[4].clone(), ]; } let link_aarch64 = if link_aarch64.starts_with("llvm") { ext(&link_aarch64, in_t, out_t) } else { let mut link = String::from("llvm.aarch64.neon."); link.push_str(&link_aarch64); ext(&link, in_t, out_t) }; let (ext_inputs, ext_output) = { if const_aarch64.is_some() { if !matches!(fn_type, Fntype::Normal) { let ptr_type = match fn_type { Fntype::Load => "*const i8", Fntype::Store => "*mut i8", _ => panic!("unsupported fn type"), }; let sub = type_to_sub_type(in_t[1]); ( match type_sub_len(in_t[1]) { 1 => format!("a: {}, n: i64, ptr: {}", sub, ptr_type), 2 => format!("a: {}, b: {}, n: i64, ptr: {}", sub, sub, ptr_type), 3 => format!( "a: {}, b: {}, c: {}, n: i64, ptr: {}", sub, sub, sub, ptr_type ), 4 => format!( "a: {}, b: {}, c: {}, d: {}, n: i64, ptr: {}", sub, sub, sub, sub, ptr_type ), _ => panic!("unsupported type: {}", in_t[1]), }, if out_t != "void" { format!(" -> {}", out_t) } else { String::new() }, ) } else { ( match para_num { 1 => format!("a: {}, n: i32", in_t[0]), 2 => format!("a: {}, b: {}, n: i32", in_t[0], in_t[1]), 3 => format!("a: {}, b: {}, c: {}, n: i32", in_t[0], in_t[1], in_t[2]), _ => unimplemented!("unknown para_num"), }, format!(" -> {}", out_t), ) } } else if matches!(fn_type, Fntype::Store) { let sub = type_to_sub_type(in_t[1]); let ptr_type = if is_vstx(&name) { "i8".to_string() } else { type_to_native_type(in_t[1]) }; let subs = match type_sub_len(in_t[1]) { 1 => format!("a: {}", sub), 2 => format!("a: {}, b: {}", sub, sub), 3 => format!("a: {}, b: {}, c: {}", sub, sub, sub), 4 => format!("a: {}, b: {}, c: {}, d: {}", sub, sub, sub, sub), _ => panic!("unsupported type: {}", in_t[1]), }; (format!("{}, ptr: *mut {}", subs, ptr_type), String::new()) } else if is_vldx(&name) { let ptr_type = if name.contains("dup") { type_to_native_type(out_t) } else { type_to_sub_type(out_t) }; ( format!("ptr: *const {}", ptr_type), format!(" -> {}", out_t), ) } else { ( match para_num { 1 => format!("a: {}", link_t[0]), 2 => format!("a: {}, b: {}", link_t[0], link_t[1]), 3 => format!("a: {}, b: {}, c: {}", link_t[0], link_t[1], link_t[2]), _ => unimplemented!("unknown para_num"), }, format!(" -> {}", link_t[3]), ) } }; ext_c = format!( r#"#[allow(improper_ctypes)] extern "unadjusted" {{ #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}){}; }} "#, link_aarch64, current_fn, ext_inputs, ext_output, ); }; let const_declare = if let Some(constn) = constn { if constn.contains(":") { let constns: Vec<_> = constn.split(':').map(|v| v.to_string()).collect(); assert_eq!(constns.len(), 2); format!(r#"<const {}: i32, const {}: i32>"#, constns[0], constns[1]) } else { format!(r#"<const {}: i32>"#, constn) } } else { String::new() }; let multi_calls = if !multi_fn.is_empty() { let mut calls = String::new(); for i in 0..multi_fn.len() { if i > 0 { calls.push_str("\n "); } calls.push_str(&get_call( &multi_fn[i], current_name, &const_declare, in_t, out_t, fixed, None, true, )); } calls } else { String::new() }; let const_assert = if let Some(constn) = constn { if constn.contains(":") { let constns: Vec<_> = constn.split(':').map(|v| v.to_string()).collect(); let const_test = current_tests[0].3.as_ref().unwrap(); let const_tests: Vec<_> = const_test.split(':').map(|v| v.to_string()).collect(); assert_eq!(constns.len(), 2); assert_eq!(const_tests.len(), 2); format!( r#", {} = {}, {} = {}"#, constns[0], map_val(in_t[1], &const_tests[0]), constns[1], map_val(in_t[1], &const_tests[1]), ) } else { format!( r#", {} = {}"#, constn, map_val(in_t[1], current_tests[0].3.as_ref().unwrap()) ) } } else { String::new() }; let const_legacy = if let Some(constn) = constn { if constn.contains(":") { format!( "\n#[rustc_legacy_const_generics({}, {})]", para_num - 1, para_num + 1 ) } else { format!("\n#[rustc_legacy_const_generics({})]", para_num) } } else { String::new() }; let fn_decl = { let fn_output = if out_t == "void" { String::new() } else { format!("-> {} ", out_t) }; let fn_inputs = match para_num { 1 => format!("(a: {})", in_t[0]), 2 => format!("(a: {}, b: {})", in_t[0], in_t[1]), 3 => format!("(a: {}, b: {}, c: {})", in_t[0], in_t[1], in_t[2]), _ => panic!("unsupported parameter number"), }; format!( "pub unsafe fn {}{}{} {}", name, const_declare, fn_inputs, fn_output ) }; let call_params = { if let (Some(const_aarch64), Some(_)) = (const_aarch64, link_aarch64) { if !matches!(fn_type, Fntype::Normal) { let subs = match type_sub_len(in_t[1]) { 1 => "b", 2 => "b.0, b.1", 3 => "b.0, b.1, b.2", 4 => "b.0, b.1, b.2, b.3", _ => panic!("unsupported type: {}", in_t[1]), }; format!( r#"{} {}{}({}, {} as i64, a as _)"#, multi_calls, ext_c, current_fn, subs, constn.as_deref().unwrap() ) } else { match para_num { 1 => format!( r#"{} {}{}(a, {})"#, multi_calls, ext_c, current_fn, const_aarch64 ), 2 => format!( r#"{} {}{}(a, b, {})"#, multi_calls, ext_c, current_fn, const_aarch64 ), _ => String::new(), } } } else if link_aarch64.is_some() && matches!(fn_type, Fntype::Store) { let cast = if is_vstx(&name) { " as _" } else { "" }; match type_sub_len(in_t[1]) { 1 => format!(r#"{}{}(b, a{})"#, ext_c, current_fn, cast), 2 => format!(r#"{}{}(b.0, b.1, a{})"#, ext_c, current_fn, cast), 3 => format!(r#"{}{}(b.0, b.1, b.2, a{})"#, ext_c, current_fn, cast), 4 => format!(r#"{}{}(b.0, b.1, b.2, b.3, a{})"#, ext_c, current_fn, cast), _ => panic!("unsupported type: {}", in_t[1]), } } else if link_aarch64.is_some() && is_vldx(&name) { format!(r#"{}{}(a as _)"#, ext_c, current_fn,) } else { let trans: [&str; 2] = if link_t[3] != out_t { ["transmute(", ")"] } else { ["", ""] }; match (multi_calls.len(), para_num, fixed.len()) { (0, 1, 0) => format!(r#"{}{}{}(a){}"#, ext_c, trans[0], current_fn, trans[1]), (0, 1, _) => { let fixed: Vec<String> = fixed.iter().take(type_len(in_t[0])).cloned().collect(); format!( r#"let b{}; {}{}{}(a, transmute(b)){}"#, values(in_t[0], &fixed), ext_c, trans[0], current_fn, trans[1], ) } (0, 2, _) => format!(r#"{}{}{}(a, b){}"#, ext_c, trans[0], current_fn, trans[1],), (0, 3, _) => format!(r#"{}{}(a, b, c)"#, ext_c, current_fn,), (_, 1, _) => format!(r#"{}{}"#, ext_c, multi_calls,), (_, 2, _) => format!(r#"{}{}"#, ext_c, multi_calls,), (_, 3, _) => format!(r#"{}{}"#, ext_c, multi_calls,), (_, _, _) => String::new(), } } }; let stable = match target { Default | ArmV7 | Vfp4 | FPArmV8 | AES => { String::from("\n#[stable(feature = \"neon_intrinsics\", since = \"1.59.0\")]") } _ => String::new(), }; let function = format!( r#" {} #[inline] #[target_feature(enable = "{}")] #[cfg_attr(test, assert_instr({}{}))]{}{} {}{{ {} }} "#, current_comment, current_target, current_aarch64, const_assert, const_legacy, stable, fn_decl, call_params ); let test_target = match target { I8MM => "neon,i8mm", SM4 => "neon,sm4", SHA3 => "neon,sha3", FTTS => "neon,frintts", _ => "neon", }; let test = match fn_type { Fntype::Normal => gen_test( &name, in_t, &out_t, current_tests, [type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])], type_len(out_t), para_num, test_target, ), Fntype::Load => gen_load_test(&name, in_t, &out_t, current_tests, type_len(out_t)), Fntype::Store => gen_store_test(&name, in_t, &out_t, current_tests, type_len(in_t[1])), }; (function, test) } fn gen_load_test( name: &str, in_t: &[&str; 3], out_t: &str, current_tests: &[( Vec<String>, Vec<String>, Vec<String>, Option<String>, Vec<String>, )], type_len: usize, ) -> String { let mut test = format!( r#" #[simd_test(enable = "neon")] unsafe fn test_{}() {{"#, name, ); for (a, b, _, n, e) in current_tests { let a: Vec<String> = a.iter().take(type_len + 1).cloned().collect(); let e: Vec<String> = e.iter().take(type_len).cloned().collect(); let has_b = b.len() > 0; let has_n = n.is_some(); let mut input = String::from("["); for i in 0..type_len + 1 { if i != 0 { input.push_str(", "); } input.push_str(&a[i]) } input.push_str("]"); let output = |v: &Vec<String>| { let mut output = String::from("["); for i in 0..type_sub_len(out_t) { if i != 0 { output.push_str(", "); } let sub_len = type_len / type_sub_len(out_t); if type_to_global_type(out_t) != "f64" { let mut sub_output = format!("{}::new(", type_to_global_type(out_t)); for j in 0..sub_len { if j != 0 { sub_output.push_str(", "); } sub_output.push_str(&v[i * sub_len + j]); } sub_output.push_str(")"); output.push_str(&sub_output); } else { output.push_str(&v[i]); } } output.push_str("]"); output }; let input_b = if has_b { let b: Vec<String> = b.iter().take(type_len).cloned().collect(); format!( r#" let b: [{}; {}] = {};"#, type_to_global_type(in_t[1]), type_sub_len(in_t[1]), output(&b), ) } else { String::new() }; let t = format!( r#" let a: [{}; {}] = {};{} let e: [{}; {}] = {}; let r: [{}; {}] = transmute({}{}(a[1..].as_ptr(){})); assert_eq!(r, e); "#, type_to_native_type(out_t), type_len + 1, input, input_b, type_to_global_type(out_t), type_sub_len(out_t), output(&e), type_to_global_type(out_t), type_sub_len(out_t), name, if has_n { format!("::<{}>", n.as_deref().unwrap()) } else { String::new() }, if has_b { ", transmute(b)" } else { "" }, ); test.push_str(&t); } test.push_str(" }\n"); test } fn gen_store_test( name: &str, in_t: &[&str; 3], _out_t: &str, current_tests: &[( Vec<String>, Vec<String>, Vec<String>, Option<String>, Vec<String>, )], type_len: usize, ) -> String { let mut test = format!( r#" #[simd_test(enable = "neon")] unsafe fn test_{}() {{"#, name, ); for (a, _, _, constn, e) in current_tests { let a: Vec<String> = a.iter().take(type_len + 1).cloned().collect(); let e: Vec<String> = e.iter().take(type_len).cloned().collect(); let mut input = String::from("["); for i in 0..type_len + 1 { if i != 0 { input.push_str(", "); } input.push_str(&a[i]) } input.push_str("]"); let mut output = String::from("["); for i in 0..type_len { if i != 0 { output.push_str(", "); } output.push_str(&e[i]) } output.push_str("]"); let const_n = constn .as_deref() .map_or(String::new(), |n| format!("::<{}>", n.to_string())); let t = format!( r#" let a: [{}; {}] = {}; let e: [{}; {}] = {}; let mut r: [{}; {}] = [0{}; {}]; {}{}(r.as_mut_ptr(), core::ptr::read_unaligned(a[1..].as_ptr() as _)); assert_eq!(r, e); "#, type_to_native_type(in_t[1]), type_len + 1, input, type_to_native_type(in_t[1]), type_len, output, type_to_native_type(in_t[1]), type_len, type_to_native_type(in_t[1]), type_len, name, const_n, ); test.push_str(&t); } test.push_str(" }\n"); test } fn gen_test( name: &str, in_t: &[&str; 3], out_t: &str, current_tests: &[( Vec<String>, Vec<String>, Vec<String>, Option<String>, Vec<String>, )], len_in: [usize; 3], len_out: usize, para_num: i32, target: &str, ) -> String { let mut test = format!( r#" #[simd_test(enable = "{}")] unsafe fn test_{}() {{"#, target, name, ); for (a, b, c, n, e) in current_tests { let a: Vec<String> = a.iter().take(len_in[0]).cloned().collect(); let b: Vec<String> = b.iter().take(len_in[1]).cloned().collect(); let c: Vec<String> = c.iter().take(len_in[2]).cloned().collect(); let e: Vec<String> = e.iter().take(len_out).cloned().collect(); let const_value = if let Some(constn) = n { if constn.contains(":") { let constns: Vec<_> = constn.split(':').map(|v| v.to_string()).collect(); format!( r#"::<{}, {}>"#, map_val(in_t[1], &constns[0]), map_val(in_t[1], &constns[1]) ) } else { format!(r#"::<{}>"#, map_val(in_t[1], constn)) } } else { String::new() }; let r_type = match type_sub_len(out_t) { 1 => type_to_global_type(out_t).to_string(), _ => format!("[{}; {}]", type_to_native_type(out_t), type_len(out_t)), }; let t = { match para_num { 1 => { format!( r#" let a{}; let e{}; let r: {} = transmute({}{}(transmute(a))); assert_eq!(r, e); "#, values(in_t[0], &a), values(out_t, &e), r_type, name, const_value ) } 2 => { format!( r#" let a{}; let b{}; let e{}; let r: {} = transmute({}{}(transmute(a), transmute(b))); assert_eq!(r, e); "#, values(in_t[0], &a), values(in_t[1], &b), values(out_t, &e), r_type, name, const_value ) } 3 => { format!( r#" let a{}; let b{}; let c{}; let e{}; let r: {} = transmute({}{}(transmute(a), transmute(b), transmute(c))); assert_eq!(r, e); "#, values(in_t[0], &a), values(in_t[1], &b), values(in_t[2], &c), values(out_t, &e), r_type, name, const_value ) } _ => { panic!("no support para_num:{}", para_num.to_string()) } } }; test.push_str(&t); } test.push_str(" }\n"); test } #[allow(clippy::too_many_arguments)] fn gen_arm( current_comment: &str, current_fn: &Option<String>, current_name: &str, current_arm: &str, link_arm: &Option<String>, current_aarch64: &Option<String>, link_aarch64: &Option<String>, const_arm: &Option<String>, const_aarch64: &Option<String>, constn: &Option<String>, in_t: &[&str; 3], out_t: &str, current_tests: &[( Vec<String>, Vec<String>, Vec<String>, Option<String>, Vec<String>, )], suffix: Suffix, para_num: i32, target: TargetFeature, fixed: &Vec<String>, multi_fn: &Vec<String>, fn_type: Fntype, separate: bool, ) -> (String, String) { let name = match suffix { Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), Double => format!( "{}{}", current_name, type_to_double_suffixes(out_t, in_t[1]) ), NoQDouble => format!( "{}{}", current_name, type_to_noq_double_suffixes(out_t, in_t[1]) ), NSuffix => format!("{}{}", current_name, type_to_n_suffix(in_t[1])), DoubleN => format!( "{}{}", current_name, type_to_double_n_suffixes(out_t, in_t[1]) ), NoQNSuffix => format!("{}{}", current_name, type_to_noq_n_suffix(in_t[1])), OutSuffix => format!("{}{}", current_name, type_to_suffix(out_t)), OutNSuffix => format!("{}{}", current_name, type_to_n_suffix(out_t)), OutNox => format!( "{}{}", current_name, type_to_suffix(&type_to_sub_type(out_t)) ), In1Nox => format!( "{}{}", current_name, type_to_suffix(&type_to_sub_type(in_t[1])) ), OutDupNox => format!( "{}{}", current_name, type_to_dup_suffix(&type_to_sub_type(out_t)) ), OutLaneNox => format!( "{}{}", current_name, type_to_lane_suffix(&type_to_sub_type(out_t)) ), In1LaneNox => format!( "{}{}", current_name, type_to_lane_suffix(&type_to_sub_type(in_t[1])) ), Lane => format!( "{}{}", current_name, type_to_lane_suffixes(out_t, in_t[1], false) ), In2 => format!("{}{}", current_name, type_to_suffix(in_t[2])), In2Lane => format!( "{}{}", current_name, type_to_lane_suffixes(out_t, in_t[2], false) ), OutLane => format!( "{}{}", current_name, type_to_lane_suffixes(out_t, in_t[2], true) ), Rot => type_to_rot_suffix(current_name, type_to_suffix(out_t)), RotLane => type_to_rot_suffix(current_name, &type_to_lane_suffixes(out_t, in_t[2], false)), }; let current_aarch64 = current_aarch64 .clone() .unwrap_or_else(|| current_arm.to_string()); let current_target_aarch64 = match target { Default => "neon", ArmV7 => "neon", Vfp4 => "neon", FPArmV8 => "neon", AES => "neon,aes", FCMA => "neon,fcma", Dotprod => "neon,dotprod", I8MM => "neon,i8mm", SHA3 => "neon,sha3", RDM => "rdm", SM4 => "neon,sm4", FTTS => "neon,frintts", }; let current_target_arm = match target { Default => "v7", ArmV7 => "v7", Vfp4 => "vfp4", FPArmV8 => "fp-armv8,v8", AES => "aes,v8", FCMA => "v8", // v8.3a Dotprod => "v8", // v8.2a I8MM => "v8,i8mm", RDM => unreachable!(), SM4 => unreachable!(), SHA3 => unreachable!(), FTTS => unreachable!(), }; let current_fn = if let Some(current_fn) = current_fn.clone() { if link_aarch64.is_some() || link_arm.is_some() { panic!( "[{}] Can't specify link and function at the same time. {} / {:?} / {:?}", name, current_fn, link_aarch64, link_arm ) } current_fn } else if link_aarch64.is_some() || link_arm.is_some() { format!("{}_", name) } else { if multi_fn.is_empty() { panic!( "[{}] Either fn or link-arm and link-aarch have to be specified.", name ) } String::new() }; let mut ext_c = String::new(); let mut ext_c_arm = if multi_fn.is_empty() || link_arm.is_none() { String::new() } else { String::from( r#" "#, ) }; let mut ext_c_aarch64 = if multi_fn.is_empty() || link_aarch64.is_none() { String::new() } else { String::from( r#" "#, ) }; let mut link_arm_t: Vec<String> = vec![ in_t[0].to_string(), in_t[1].to_string(), in_t[2].to_string(), out_t.to_string(), ]; let mut link_aarch64_t: Vec<String> = vec![ in_t[0].to_string(), in_t[1].to_string(), in_t[2].to_string(), out_t.to_string(), ]; if let (Some(mut link_arm), Some(mut link_aarch64)) = (link_arm.clone(), link_aarch64.clone()) { if link_arm.contains(":") { let links: Vec<_> = link_arm.split(':').map(|v| v.to_string()).collect(); assert_eq!(links.len(), 5); link_arm = links[0].to_string(); link_arm_t = vec![ links[1].clone(), links[2].clone(), links[3].clone(), links[4].clone(), ]; } if link_aarch64.contains(":") { let links: Vec<_> = link_aarch64.split(':').map(|v| v.to_string()).collect(); assert_eq!(links.len(), 5); link_aarch64 = links[0].to_string(); link_aarch64_t = vec![ links[1].clone(), links[2].clone(), links[3].clone(), links[4].clone(), ]; } let link_arm = if link_arm.starts_with("llvm") { ext(&link_arm, in_t, out_t) } else { let mut link = String::from("llvm.arm.neon."); link.push_str(&link_arm); ext(&link, in_t, out_t) }; let link_aarch64 = if link_aarch64.starts_with("llvm") { ext(&link_aarch64, in_t, out_t) } else { let mut link = String::from("llvm.aarch64.neon."); link.push_str(&link_aarch64); ext(&link, in_t, out_t) }; if out_t == link_arm_t[3] && out_t == link_aarch64_t[3] { ext_c = format!( r#"#[allow(improper_ctypes)] extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "{}")] #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}) -> {}; }} "#, link_arm, link_aarch64, current_fn, match para_num { 1 => format!("a: {}", in_t[0]), 2 => format!("a: {}, b: {}", in_t[0], in_t[1]), 3 => format!("a: {}, b: {}, c: {}", in_t[0], in_t[1], in_t[2]), _ => unimplemented!("unknown para_num"), }, out_t ); }; let (arm_ext_inputs, arm_ext_output) = { if let Some(const_arm) = const_arm { if !matches!(fn_type, Fntype::Normal) { let ptr_type = match fn_type { Fntype::Load => "*const i8", Fntype::Store => "*mut i8", _ => panic!("unsupported fn type"), }; let sub_type = type_to_sub_type(in_t[1]); let inputs = match type_sub_len(in_t[1]) { 1 => format!("a: {}", sub_type), 2 => format!("a: {}, b: {}", sub_type, sub_type,), 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, ), _ => panic!("unknown type: {}", in_t[1]), }; let out = if out_t == "void" { String::new() } else { format!(" -> {}", out_t) }; ( format!("ptr: {}, {}, n: i32, size: i32", ptr_type, inputs), out, ) } else { let (_, const_type) = if const_arm.contains(":") { let consts: Vec<_> = const_arm.split(':').map(|v| v.trim().to_string()).collect(); (consts[0].clone(), consts[1].clone()) } else { ( const_arm.to_string(), in_t[para_num as usize - 1].to_string(), ) }; ( match para_num { 1 => format!("a: {}, n: {}", in_t[0], const_type), 2 => format!("a: {}, b: {}, n: {}", in_t[0], in_t[1], const_type), 3 => format!( "a: {}, b: {}, c: {}, n: {}", in_t[0], in_t[1], in_t[2], const_type ), _ => unimplemented!("unknown para_num"), }, format!(" -> {}", out_t), ) } } else if out_t != link_arm_t[3] { ( match para_num { 1 => format!("a: {}", link_arm_t[0]), 2 => format!("a: {}, b: {}", link_arm_t[0], link_arm_t[1]), 3 => format!( "a: {}, b: {}, c: {}", link_arm_t[0], link_arm_t[1], link_arm_t[2] ), _ => unimplemented!("unknown para_num"), }, format!(" -> {}", link_arm_t[3]), ) } else if matches!(fn_type, Fntype::Store) { let sub_type = type_to_sub_type(in_t[1]); let inputs = match type_sub_len(in_t[1]) { 1 => format!("a: {}", sub_type), 2 => format!("a: {}, b: {}", sub_type, sub_type,), 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, ), _ => panic!("unknown type: {}", in_t[1]), }; let (ptr_type, size) = if is_vstx(&name) { ("i8".to_string(), ", size: i32") } else { (type_to_native_type(in_t[1]), "") }; ( format!("ptr: *mut {}, {}{}", ptr_type, inputs, size), String::new(), ) } else if is_vldx(&name) { ( format!("ptr: *const i8, size: i32"), format!(" -> {}", out_t), ) } else { (String::new(), String::new()) } }; ext_c_arm.push_str(&format!( r#"#[allow(improper_ctypes)] extern "unadjusted" {{ #[cfg_attr(target_arch = "arm", link_name = "{}")] fn {}({}){}; }} "#, link_arm, current_fn, arm_ext_inputs, arm_ext_output, )); let (aarch64_ext_inputs, aarch64_ext_output) = { if let Some(const_aarch64) = const_aarch64 { if !matches!(fn_type, Fntype::Normal) { let ptr_type = match fn_type { Fntype::Load => "*const i8", Fntype::Store => "*mut i8", _ => panic!("unsupported fn type"), }; let sub_type = type_to_sub_type(in_t[1]); let mut inputs = match type_sub_len(in_t[1]) { 1 => format!("a: {}", sub_type,), 2 => format!("a: {}, b: {}", sub_type, sub_type,), 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, ), _ => panic!("unknown type: {}", in_t[1]), }; inputs.push_str(&format!(", n: i64, ptr: {}", ptr_type)); let out = if out_t == "void" { String::new() } else { format!(" -> {}", out_t) }; (inputs, out) } else if const_aarch64.contains("dup-in_len-N as ttn") { ( match para_num { 1 => format!("a: {}, n: {}", in_t[0], in_t[0]), 2 => format!("a: {}, b: {}, n: {}", in_t[0], in_t[1], in_t[1]), 3 => format!( "a: {}, b: {}, c: {}, n: {}", in_t[0], in_t[1], in_t[2], in_t[1] ), _ => unimplemented!("unknown para_num"), }, format!(" -> {}", out_t), ) } else { ( match para_num { 1 => format!("a: {}, n: i32", in_t[0]), 2 => format!("a: {}, b: {}, n: i32", in_t[0], in_t[1]), 3 => format!("a: {}, b: {}, c: {}, n: i32", in_t[0], in_t[1], in_t[2]), _ => unimplemented!("unknown para_num"), }, format!(" -> {}", out_t), ) } } else if out_t != link_aarch64_t[3] { ( match para_num { 1 => format!("a: {}", link_aarch64_t[0]), 2 => format!("a: {}, b: {}", link_aarch64_t[0], link_aarch64_t[1]), 3 => format!( "a: {}, b: {}, c: {}", link_aarch64_t[0], link_aarch64_t[1], link_aarch64_t[2] ), _ => unimplemented!("unknown para_num"), }, format!(" -> {}", link_aarch64_t[3]), ) } else if matches!(fn_type, Fntype::Store) { let sub_type = type_to_sub_type(in_t[1]); let mut inputs = match type_sub_len(in_t[1]) { 1 => format!("a: {}", sub_type,), 2 => format!("a: {}, b: {}", sub_type, sub_type,), 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, ), _ => panic!("unknown type: {}", in_t[1]), }; let ptr_type = if is_vstx(&name) { "i8".to_string() } else { type_to_native_type(in_t[1]) }; inputs.push_str(&format!(", ptr: *mut {}", ptr_type)); (inputs, String::new()) } else if is_vldx(&name) { let ptr_type = if name.contains("dup") { type_to_native_type(out_t) } else { type_to_sub_type(out_t) }; ( format!("ptr: *const {}", ptr_type), format!(" -> {}", out_t), ) } else { (String::new(), String::new()) } }; ext_c_aarch64.push_str(&format!( r#"#[allow(improper_ctypes)] extern "unadjusted" {{ #[cfg_attr(target_arch = "aarch64", link_name = "{}")] fn {}({}){}; }} "#, link_aarch64, current_fn, aarch64_ext_inputs, aarch64_ext_output, )); }; let const_declare = if let Some(constn) = constn { format!(r#"<const {}: i32>"#, constn) } else { String::new() }; let multi_calls = if !multi_fn.is_empty() { let mut calls = String::new(); for i in 0..multi_fn.len() { if i > 0 { calls.push_str("\n "); } calls.push_str(&get_call( &multi_fn[i], current_name, &const_declare, in_t, out_t, fixed, None, false, )); } calls } else { String::new() }; let const_assert = if let Some(constn) = constn { format!( r#", {} = {}"#, constn, map_val(in_t[1], current_tests[0].3.as_ref().unwrap()) ) } else { String::new() }; let const_legacy = if constn.is_some() { format!("\n#[rustc_legacy_const_generics({})]", para_num) } else { String::new() }; let fn_decl = { let fn_output = if out_t == "void" { String::new() } else { format!("-> {} ", out_t) }; let fn_inputs = match para_num { 1 => format!("(a: {})", in_t[0]), 2 => format!("(a: {}, b: {})", in_t[0], in_t[1]), 3 => format!("(a: {}, b: {}, c: {})", in_t[0], in_t[1], in_t[2]), _ => panic!("unsupported parameter number"), }; format!( "pub unsafe fn {}{}{} {}", name, const_declare, fn_inputs, fn_output ) }; let function = if separate { let call_arm = { let arm_params = if let (Some(const_arm), Some(_)) = (const_arm, link_arm) { if !matches!(fn_type, Fntype::Normal) { let subs = match type_sub_len(in_t[1]) { 1 => "b", 2 => "b.0, b.1", 3 => "b.0, b.1, b.2", 4 => "b.0, b.1, b.2, b.3", _ => "", }; format!( "{}(a as _, {}, {}, {})", current_fn, subs, constn.as_deref().unwrap(), type_bits(&type_to_sub_type(in_t[1])) / 8, ) } else { let cnt = if const_arm.contains(':') { let consts: Vec<_> = const_arm.split(':').map(|v| v.trim().to_string()).collect(); consts[0].clone() } else { let const_arm = const_arm.replace("ttn", &type_to_native_type(in_t[1])); let mut cnt = String::from(in_t[1]); cnt.push_str("("); for i in 0..type_len(in_t[1]) { if i != 0 { cnt.push_str(", "); } cnt.push_str(&const_arm); } cnt.push_str(")"); cnt }; match para_num { 1 => format!("{}(a, {})", current_fn, cnt), 2 => format!("{}(a, b, {})", current_fn, cnt), _ => String::new(), } } } else if out_t != link_arm_t[3] { match para_num { 1 => format!("transmute({}(a))", current_fn,), 2 => format!("transmute({}(transmute(a), transmute(b)))", current_fn,), _ => String::new(), } } else if matches!(fn_type, Fntype::Store) { let (cast, size) = if is_vstx(&name) { ( " as _", format!(", {}", type_bits(&type_to_sub_type(in_t[1])) / 8), ) } else { ("", String::new()) }; match type_sub_len(in_t[1]) { 1 => format!("{}(a{}, b{})", current_fn, cast, size), 2 => format!("{}(a{}, b.0, b.1{})", current_fn, cast, size), 3 => format!("{}(a{}, b.0, b.1, b.2{})", current_fn, cast, size), 4 => format!("{}(a{}, b.0, b.1, b.2, b.3{})", current_fn, cast, size), _ => String::new(), } } else if link_arm.is_some() && is_vldx(&name) { format!( "{}(a as *const i8, {})", current_fn, type_bits(&type_to_sub_type(out_t)) / 8 ) } else { String::new() }; format!( r#"{}{{ {}{}{} }}"#, fn_decl, multi_calls, ext_c_arm, arm_params ) }; let call_aarch64 = { let aarch64_params = if let (Some(const_aarch64), Some(_)) = (const_aarch64, link_aarch64) { if !matches!(fn_type, Fntype::Normal) { let subs = match type_sub_len(in_t[1]) { 1 => "b", 2 => "b.0, b.1", 3 => "b.0, b.1, b.2", 4 => "b.0, b.1, b.2, b.3", _ => "", }; format!( "{}({}, {} as i64, a as _)", current_fn, subs, constn.as_deref().unwrap() ) } else if const_aarch64.contains("dup-in_len-N as ttn") { let const_aarch64 = format!("N as {}", type_to_native_type(in_t[1])); let mut cnt = String::from(in_t[1]); cnt.push_str("("); for i in 0..type_len(in_t[1]) { if i != 0 { cnt.push_str(", "); } cnt.push_str(&const_aarch64); } cnt.push_str(")"); format!("{}(a, {})", current_fn, cnt) } else { match para_num { 1 => format!("{}(a, {})", current_fn, const_aarch64), 2 => format!("{}(a, b, {})", current_fn, const_aarch64), _ => String::new(), } } } else if out_t != link_aarch64_t[3] { match para_num { 1 => format!("transmute({}(a))", current_fn,), 2 => format!("transmute({}(a, b))", current_fn,), _ => String::new(), } } else if matches!(fn_type, Fntype::Store) { let cast = if is_vstx(&name) { " as _" } else { "" }; match type_sub_len(in_t[1]) { 1 => format!("{}(b, a{})", current_fn, cast), 2 => format!("{}(b.0, b.1, a{})", current_fn, cast), 3 => format!("{}(b.0, b.1, b.2, a{})", current_fn, cast), 4 => format!("{}(b.0, b.1, b.2, b.3, a{})", current_fn, cast), _ => String::new(), } } else if link_aarch64.is_some() && is_vldx(&name) { format!("{}(a as _)", current_fn) } else { String::new() }; format!( r#"{}{{ {}{}{} }}"#, fn_decl, multi_calls, ext_c_aarch64, aarch64_params ) }; let stable_aarch64 = match target { Default | ArmV7 | Vfp4 | FPArmV8 | AES => { String::from("\n#[stable(feature = \"neon_intrinsics\", since = \"1.59.0\")]") } _ => String::new(), }; format!( r#" {} #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,{}")] #[cfg_attr(test, assert_instr({}{}))]{} {} {} #[inline] #[cfg(target_arch = "aarch64")] #[target_feature(enable = "{}")] #[cfg_attr(test, assert_instr({}{}))]{}{} {} "#, current_comment, current_target_arm, expand_intrinsic(&current_arm, in_t[1]), const_assert, const_legacy, call_arm, current_comment, current_target_aarch64, expand_intrinsic(&current_aarch64, in_t[1]), const_assert, const_legacy, stable_aarch64, call_aarch64, ) } else { let call = { let stmts = match (multi_calls.len(), para_num, fixed.len()) { (0, 1, 0) => format!(r#"{}{}(a)"#, ext_c, current_fn,), (0, 1, _) => { let fixed: Vec<String> = fixed.iter().take(type_len(in_t[0])).cloned().collect(); format!( r#"let b{}; {}{}(a, transmute(b))"#, values(in_t[0], &fixed), ext_c, current_fn, ) } (0, 2, _) => format!(r#"{}{}(a, b)"#, ext_c, current_fn,), (0, 3, _) => format!(r#"{}{}(a, b, c)"#, ext_c, current_fn,), (_, 1, _) => format!(r#"{}{}"#, ext_c, multi_calls,), (_, 2, _) => format!(r#"{}{}"#, ext_c, multi_calls,), (_, 3, _) => format!(r#"{}{}"#, ext_c, multi_calls,), (_, _, _) => String::new(), }; if stmts != String::new() { format!( r#"{}{{ {} }}"#, fn_decl, stmts ) } else { String::new() } }; let stable_aarch64 = match target { Default | ArmV7 | Vfp4 | FPArmV8 | AES => String::from("\n#[cfg_attr(target_arch = \"aarch64\", stable(feature = \"neon_intrinsics\", since = \"1.59.0\"))]"), _ => String::new(), }; format!( r#" {} #[inline] #[target_feature(enable = "{}")] #[cfg_attr(target_arch = "arm", target_feature(enable = "{}"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr({}{}))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}{}))]{}{} {} "#, current_comment, current_target_aarch64, current_target_arm, expand_intrinsic(&current_arm, in_t[1]), const_assert, expand_intrinsic(&current_aarch64, in_t[1]), const_assert, const_legacy, stable_aarch64, call, ) }; let test_target = match target { I8MM => "neon,i8mm", SM4 => "neon,sm4", SHA3 => "neon,sha3", FTTS => "neon,frintts", _ => "neon", }; let test = match fn_type { Fntype::Normal => gen_test( &name, in_t, &out_t, current_tests, [type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])], type_len(out_t), para_num, test_target, ), Fntype::Load => gen_load_test(&name, in_t, &out_t, current_tests, type_len(out_t)), Fntype::Store => gen_store_test(&name, in_t, &out_t, current_tests, type_len(in_t[1])), }; (function, test) } fn expand_intrinsic(intr: &str, t: &str) -> String { if intr.ends_with('.') { let ext = match t { "int8x8_t" => "i8", "int8x16_t" => "i8", "int16x4_t" => "i16", "int16x8_t" => "i16", "int32x2_t" => "i32", "int32x4_t" => "i32", "int64x1_t" => "i64", "int64x2_t" => "i64", "uint8x8_t" => "i8", "uint8x16_t" => "i8", "uint16x4_t" => "i16", "uint16x8_t" => "i16", "uint32x2_t" => "i32", "uint32x4_t" => "i32", "uint64x1_t" => "i64", "uint64x2_t" => "i64", "float16x4_t" => "f16", "float16x8_t" => "f16", "float32x2_t" => "f32", "float32x4_t" => "f32", "float64x1_t" => "f64", "float64x2_t" => "f64", "poly8x8_t" => "i8", "poly8x16_t" => "i8", "poly16x4_t" => "i16", "poly16x8_t" => "i16", /* "poly64x1_t" => "i64x1", "poly64x2_t" => "i64x2", */ _ => panic!("unknown type for extension: {}", t), }; format!(r#""{}{}""#, intr, ext) } else if intr.ends_with(".s") { let ext = match t { "int8x8_t" => "s8", "int8x16_t" => "s8", "int16x4_t" => "s16", "int16x8_t" => "s16", "int32x2_t" => "s32", "int32x4_t" => "s32", "int64x1_t" => "s64", "int64x2_t" => "s64", "uint8x8_t" => "u8", "uint8x16_t" => "u8", "uint16x4_t" => "u16", "uint16x8_t" => "u16", "uint32x2_t" => "u32", "uint32x4_t" => "u32", "uint64x1_t" => "u64", "uint64x2_t" => "u64", "poly8x8_t" => "p8", "poly8x16_t" => "p8", "poly16x4_t" => "p16", "poly16x8_t" => "p16", "float16x4_t" => "f16", "float16x8_t" => "f16", "float32x2_t" => "f32", "float32x4_t" => "f32", "float64x1_t" => "f64", "float64x2_t" => "f64", /* "poly64x1_t" => "i64x1", "poly64x2_t" => "i64x2", */ _ => panic!("unknown type for extension: {}", t), }; format!(r#""{}{}""#, &intr[..intr.len() - 1], ext) } else if intr.ends_with(".l") { let ext = match t { "int8x8_t" => "8", "int8x16_t" => "8", "int16x4_t" => "16", "int16x8_t" => "16", "int32x2_t" => "32", "int32x4_t" => "32", "int64x1_t" => "64", "int64x2_t" => "64", "uint8x8_t" => "8", "uint8x16_t" => "8", "uint16x4_t" => "16", "uint16x8_t" => "16", "uint32x2_t" => "32", "uint32x4_t" => "32", "uint64x1_t" => "64", "uint64x2_t" => "64", "poly8x8_t" => "8", "poly8x16_t" => "8", "poly16x4_t" => "16", "poly16x8_t" => "16", "float16x4_t" => "16", "float16x8_t" => "16", "float32x2_t" => "32", "float32x4_t" => "32", "float64x1_t" => "64", "float64x2_t" => "64", "poly64x1_t" => "64", "poly64x2_t" => "64", _ => panic!("unknown type for extension: {}", t), }; format!(r#""{}{}""#, &intr[..intr.len() - 1], ext) } else { intr.to_string() } } fn get_call( in_str: &str, current_name: &str, const_declare: &str, in_t: &[&str; 3], out_t: &str, fixed: &Vec<String>, n: Option<i32>, aarch64: bool, ) -> String { let params: Vec<_> = in_str.split(',').map(|v| v.trim().to_string()).collect(); assert!(params.len() > 0); let mut fn_name = params[0].clone(); if fn_name == "a" { return String::from("a"); } if fn_name == "transpose-1-in_len" { return transpose1(type_len(in_t[1])).to_string(); } if fn_name == "transpose-2-in_len" { return transpose2(type_len(in_t[1])).to_string(); } if fn_name == "zip-1-in_len" { return zip1(type_len(in_t[1])).to_string(); } if fn_name == "zip-2-in_len" { return zip2(type_len(in_t[1])).to_string(); } if fn_name == "unzip-1-in_len" { return unzip1(type_len(in_t[1])).to_string(); } if fn_name == "unzip-2-in_len" { return unzip2(type_len(in_t[1])).to_string(); } if fn_name.starts_with("dup") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); let len = match &*fn_format[1] { "out_len" => type_len(out_t), "in_len" => type_len(in_t[1]), "in0_len" => type_len(in_t[0]), "halflen" => type_len(in_t[1]) / 2, _ => 0, }; let mut s = format!("{} [", const_declare); for i in 0..len { if i != 0 { s.push_str(", "); } s.push_str(&fn_format[2]); } s.push_str("]"); return s; } if fn_name.starts_with("asc") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); let start = match &*fn_format[1] { "0" => 0, "n" => n.unwrap(), "out_len" => type_len(out_t) as i32, "halflen" => (type_len(in_t[1]) / 2) as i32, s => s.parse::<i32>().unwrap(), }; let len = match &*fn_format[2] { "out_len" => type_len(out_t), "in_len" => type_len(in_t[1]), "in0_len" => type_len(in_t[0]), "halflen" => type_len(in_t[1]) / 2, _ => 0, }; return asc(start, len); } if fn_name.starts_with("base") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); assert_eq!(fn_format.len(), 3); let mut s = format!("<const {}: i32> [", &fn_format[2]); let base_len = fn_format[1].parse::<usize>().unwrap(); for i in 0..type_len(in_t[1]) / base_len { for j in 0..base_len { if i != 0 || j != 0 { s.push_str(", "); } s.push_str(&format!("{} * {} as u32", base_len, &fn_format[2])); if j != 0 { s.push_str(&format!(" + {}", j)); } } } s.push_str("]"); return s; } if fn_name.starts_with("as") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); assert_eq!(fn_format.len(), 3); let t = match &*fn_format[2] { "in_ttn" => type_to_native_type(in_t[1]), _ => String::new(), }; return format!("{} as {}", &fn_format[1], t); } if fn_name.starts_with("ins") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); let n = n.unwrap(); let len = match &*fn_format[1] { "out_len" => type_len(out_t), "in_len" => type_len(in_t[1]), "in0_len" => type_len(in_t[0]), _ => 0, }; let offset = match &*fn_format[2] { "out_len" => type_len(out_t), "in_len" => type_len(in_t[1]), "in0_len" => type_len(in_t[0]), _ => 0, }; let mut s = format!("{} [", const_declare); for i in 0..len { if i != 0 { s.push_str(", "); } if i == n as usize { s.push_str(&format!("{} + {} as u32", offset.to_string(), fn_format[3])); } else { s.push_str(&i.to_string()); } } s.push_str("]"); return s; } if fn_name.starts_with("static_assert_imm") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); let len = match &*fn_format[1] { "out_exp_len" => type_exp_len(out_t, 1), "out_bits_exp_len" => type_bits_exp_len(out_t), "in_exp_len" => type_exp_len(in_t[1], 1), "in_bits_exp_len" => type_bits_exp_len(in_t[1]), "in0_exp_len" => type_exp_len(in_t[0], 1), "in1_exp_len" => type_exp_len(in_t[1], 1), "in2_exp_len" => type_exp_len(in_t[2], 1), "in2_rot" => type_exp_len(in_t[2], 2), "in2_dot" => type_exp_len(in_t[2], 4), _ => 0, }; if len == 0 { return format!( r#"static_assert!({} : i32 where {} == 0);"#, fn_format[2], fn_format[2] ); } else { return format!(r#"static_assert_imm{}!({});"#, len, fn_format[2]); } } if fn_name.starts_with("static_assert") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); let lim1 = if fn_format[2] == "bits" { type_bits(in_t[1]).to_string() } else if fn_format[2] == "halfbits" { (type_bits(in_t[1]) / 2).to_string() } else { fn_format[2].clone() }; let lim2 = if fn_format[3] == "bits" { type_bits(in_t[1]).to_string() } else if fn_format[3] == "halfbits" { (type_bits(in_t[1]) / 2).to_string() } else { fn_format[3].clone() }; if lim1 == lim2 { return format!( r#"static_assert!({} : i32 where {} == {});"#, fn_format[1], fn_format[1], lim1 ); } else { return format!( r#"static_assert!({} : i32 where {} >= {} && {} <= {});"#, fn_format[1], fn_format[1], lim1, fn_format[1], lim2 ); } } if fn_name.starts_with("fix_right_shift_imm") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); let lim = if fn_format[2] == "bits" { type_bits(in_t[1]).to_string() } else { fn_format[2].clone() }; let fixed = if in_t[1].starts_with('u') { format!("return vdup{nself}(0);", nself = type_to_n_suffix(in_t[1])) } else { (lim.parse::<i32>().unwrap() - 1).to_string() }; return format!( r#"let {name}: i32 = if {const_name} == {upper} {{ {fixed} }} else {{ N }};"#, name = fn_format[1].to_lowercase(), const_name = fn_format[1], upper = lim, fixed = fixed, ); } if fn_name.starts_with("matchn") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); let len = match &*fn_format[1] { "out_exp_len" => type_exp_len(out_t, 1), "in_exp_len" => type_exp_len(in_t[1], 1), "in0_exp_len" => type_exp_len(in_t[0], 1), _ => 0, }; let mut call = format!("match {} & 0b{} {{\n", &fn_format[2], "1".repeat(len)); let mut sub_call = String::new(); for p in 1..params.len() { if !sub_call.is_empty() { sub_call.push_str(", "); } sub_call.push_str(&params[p]); } for i in 0..(2u32.pow(len as u32) as usize) { let sub_match = format!( " {} => {},\n", i, get_call( &sub_call, current_name, const_declare, in_t, out_t, fixed, Some(i as i32), aarch64 ) ); call.push_str(&sub_match); } call.push_str(" _ => unreachable_unchecked(),\n }"); return call; } let mut re: Option<(String, String)> = None; let mut param_str = String::new(); let mut i = 1; while i < params.len() { let s = &params[i]; if s.starts_with('{') { let mut sub_fn = String::new(); let mut parentheses = 0; while i < params.len() { if !sub_fn.is_empty() { sub_fn.push_str(", "); } sub_fn.push_str(&params[i]); let l = params[i].len(); for j in 0..l { if &params[i][j..j + 1] == "{" { parentheses += 1; } else { break; } } for j in 0..l { if &params[i][l - j - 1..l - j] == "}" { parentheses -= 1; } else { break; } } if parentheses == 0 { break; } i += 1; } let sub_call = get_call( &sub_fn[1..sub_fn.len() - 1], current_name, const_declare, in_t, out_t, fixed, n.clone(), aarch64, ); if !param_str.is_empty() { param_str.push_str(", "); } param_str.push_str(&sub_call); } else if s.contains(':') { let re_params: Vec<_> = s.split(':').map(|v| v.to_string()).collect(); if re_params[1] == "" { re = Some((re_params[0].clone(), in_t[1].to_string())); } else if re_params[1] == "in_t" { re = Some((re_params[0].clone(), in_t[1].to_string())); } else if re_params[1] == "signed" { re = Some((re_params[0].clone(), type_to_signed(in_t[1]))); } else if re_params[1] == "unsigned" { re = Some((re_params[0].clone(), type_to_unsigned(in_t[1]))); } else if re_params[1] == "in_t0" { re = Some((re_params[0].clone(), in_t[0].to_string())); } else if re_params[1] == "in_t1" { re = Some((re_params[0].clone(), in_t[1].to_string())); } else if re_params[1] == "out_t" { re = Some((re_params[0].clone(), out_t.to_string())); } else if re_params[1] == "half" { re = Some((re_params[0].clone(), type_to_half(in_t[1]).to_string())); } else if re_params[1] == "in_ntt" { re = Some(( re_params[0].clone(), native_type_to_type(in_t[1]).to_string(), )); } else if re_params[1] == "in_long_ntt" { re = Some(( re_params[0].clone(), native_type_to_long_type(in_t[1]).to_string(), )); } else if re_params[1] == "out_ntt" { re = Some((re_params[0].clone(), native_type_to_type(out_t).to_string())); } else if re_params[1] == "out_long_ntt" { re = Some(( re_params[0].clone(), native_type_to_long_type(out_t).to_string(), )); } else { re = Some((re_params[0].clone(), re_params[1].clone())); } } else { if !param_str.is_empty() { param_str.push_str(", "); } param_str.push_str(s); } i += 1; } if fn_name == "fixed" { let (re_name, re_type) = re.unwrap(); let fixed: Vec<String> = fixed.iter().take(type_len(in_t[1])).cloned().collect(); return format!(r#"let {}{};"#, re_name, values(&re_type, &fixed)); } if fn_name == "fixed-half-right" { let fixed: Vec<String> = fixed.iter().take(type_len(in_t[1])).cloned().collect(); let half = fixed[type_len(in_t[1]) / 2..] .iter() .fold(String::new(), |mut s, fix| { s.push_str(fix); s.push_str(", "); s }); return format!(r#"[{}]"#, &half[..half.len() - 2]); } if fn_name == "a - b" { return fn_name; } if fn_name == "-a" { return fn_name; } if fn_name.contains('-') { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); assert_eq!(fn_format.len(), 3); fn_name = if fn_format[0] == "self" { current_name.to_string() } else { fn_format[0].clone() }; if fn_format[1] == "self" { fn_name.push_str(type_to_suffix(in_t[1])); } else if fn_format[1] == "nself" { fn_name.push_str(type_to_n_suffix(in_t[1])); } else if fn_format[1] == "nselfvfp4" { fn_name.push_str(type_to_n_suffix(in_t[1])); if !aarch64 { fn_name.push_str("_vfp4"); } } else if fn_format[1] == "out" { fn_name.push_str(type_to_suffix(out_t)); } else if fn_format[1] == "in0" { fn_name.push_str(type_to_suffix(in_t[0])); } else if fn_format[1] == "in2" { fn_name.push_str(type_to_suffix(in_t[2])); } else if fn_format[1] == "in2lane" { fn_name.push_str(&type_to_lane_suffixes(out_t, in_t[2], false)); } else if fn_format[1] == "outlane" { fn_name.push_str(&type_to_lane_suffixes(out_t, in_t[2], true)); } else if fn_format[1] == "signed" { fn_name.push_str(type_to_suffix(&type_to_signed(&String::from(in_t[1])))); } else if fn_format[1] == "outsigned" { fn_name.push_str(type_to_suffix(&type_to_signed(&String::from(out_t)))); } else if fn_format[1] == "outsignednox" { fn_name.push_str(&type_to_suffix(&type_to_sub_type(&type_to_signed( &String::from(out_t), )))); } else if fn_format[1] == "in1signednox" { fn_name.push_str(&type_to_suffix(&type_to_sub_type(&type_to_signed( &String::from(in_t[1]), )))); } else if fn_format[1] == "outsigneddupnox" { fn_name.push_str(&type_to_dup_suffix(&type_to_sub_type(&type_to_signed( &String::from(out_t), )))); } else if fn_format[1] == "outsignedlanenox" { fn_name.push_str(&type_to_lane_suffix(&type_to_sub_type(&type_to_signed( &String::from(out_t), )))); } else if fn_format[1] == "in1signedlanenox" { fn_name.push_str(&type_to_lane_suffix(&type_to_sub_type(&type_to_signed( &String::from(in_t[1]), )))); } else if fn_format[1] == "unsigned" { fn_name.push_str(type_to_suffix(&type_to_unsigned(in_t[1]))); } else if fn_format[1] == "doubleself" { fn_name.push_str(&type_to_double_suffixes(out_t, in_t[1])); } else if fn_format[1] == "noq_doubleself" { fn_name.push_str(&type_to_noq_double_suffixes(out_t, in_t[1])); } else if fn_format[1] == "noqself" { fn_name.push_str(type_to_noq_suffix(in_t[1])); } else if fn_format[1] == "noqsigned" { fn_name.push_str(type_to_noq_suffix(&type_to_signed(&String::from(in_t[1])))); } else if fn_format[1] == "nosuffix" { } else if fn_format[1] == "in_len" { fn_name.push_str(&type_len(in_t[1]).to_string()); } else if fn_format[1] == "in0_len" { fn_name.push_str(&type_len(in_t[0]).to_string()); } else if fn_format[1] == "out_len" { fn_name.push_str(&type_len(out_t).to_string()); } else if fn_format[1] == "halflen" { fn_name.push_str(&(type_len(in_t[1]) / 2).to_string()); } else if fn_format[1] == "nout" { fn_name.push_str(type_to_n_suffix(out_t)); } else if fn_format[1] == "nin0" { fn_name.push_str(type_to_n_suffix(in_t[0])); } else if fn_format[1] == "nsigned" { fn_name.push_str(type_to_n_suffix(&type_to_signed(&String::from(in_t[1])))); } else if fn_format[1] == "in_ntt" { fn_name.push_str(type_to_suffix(native_type_to_type(in_t[1]))); } else if fn_format[1] == "out_ntt" { fn_name.push_str(type_to_suffix(native_type_to_type(out_t))); } else if fn_format[1] == "rot" { fn_name = type_to_rot_suffix(&fn_name, type_to_suffix(out_t)); } else { fn_name.push_str(&fn_format[1]); }; if fn_format[2] == "ext" { fn_name.push_str("_"); } else if fn_format[2] == "noext" { } else if fn_format[2].starts_with("<") { assert!(fn_format[2].ends_with(">")); let types: Vec<_> = fn_format[2][1..fn_format[2].len() - 1] .split(' ') .map(|v| v.to_string()) .collect(); assert_eq!(types.len(), 2); let type1 = if types[0] == "element_t" { type_to_native_type(in_t[1]) } else { String::from(&types[0]) }; let type2 = if types[1] == "element_t" { type_to_native_type(in_t[1]) } else { String::from(&types[1]) }; fn_name.push_str(&format!("::<{}, {}>", &type1, &type2)); } else { fn_name.push_str(&fn_format[2]); } } if param_str.is_empty() { return fn_name.replace("out_t", out_t); } let fn_str = if let Some((re_name, re_type)) = re.clone() { format!( r#"let {}: {} = {}({});"#, re_name, re_type, fn_name, param_str ) } else if fn_name.starts_with("*") { format!(r#"{} = {};"#, fn_name, param_str) } else { format!(r#"{}({})"#, fn_name, param_str) }; return fn_str; } fn main() -> io::Result<()> { let args: Vec<String> = env::args().collect(); let in_file = args.get(1).cloned().unwrap_or_else(|| IN.to_string()); let f = File::open(in_file).expect("Failed to open neon.spec"); let f = BufReader::new(f); let mut current_comment = String::new(); let mut current_name: Option<String> = None; let mut current_fn: Option<String> = None; let mut current_arm: Option<String> = None; let mut current_aarch64: Option<String> = None; let mut link_arm: Option<String> = None; let mut link_aarch64: Option<String> = None; let mut const_arm: Option<String> = None; let mut const_aarch64: Option<String> = None; let mut constn: Option<String> = None; let mut para_num = 2; let mut suffix: Suffix = Normal; let mut a: Vec<String> = Vec::new(); let mut b: Vec<String> = Vec::new(); let mut c: Vec<String> = Vec::new(); let mut n: Option<String> = None; let mut fixed: Vec<String> = Vec::new(); let mut current_tests: Vec<( Vec<String>, Vec<String>, Vec<String>, Option<String>, Vec<String>, )> = Vec::new(); let mut multi_fn: Vec<String> = Vec::new(); let mut target: TargetFeature = Default; let mut fn_type: Fntype = Fntype::Normal; let mut separate = false; // // THIS FILE IS GENERATED FORM neon.spec DO NOT CHANGE IT MANUALLY // let mut out_arm = String::from( r#"// This code is automatically generated. DO NOT MODIFY. // // Instead, modify `crates/stdarch-gen/neon.spec` and run the following command to re-generate this file: // // ``` // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen -- crates/stdarch-gen/neon.spec // ``` use super::*; #[cfg(test)] use stdarch_test::assert_instr; "#, ); let mut tests_arm = String::from( r#" #[cfg(test)] #[allow(overflowing_literals)] mod test { use super::*; use crate::core_arch::simd::*; use std::mem::transmute; use stdarch_test::simd_test; "#, ); // // THIS FILE IS GENERATED FORM neon.spec DO NOT CHANGE IT MANUALLY // let mut out_aarch64 = String::from( r#"// This code is automatically generated. DO NOT MODIFY. // // Instead, modify `crates/stdarch-gen/neon.spec` and run the following command to re-generate this file: // // ``` // OUT_DIR=`pwd`/crates/core_arch cargo run -p stdarch-gen -- crates/stdarch-gen/neon.spec // ``` use super::*; #[cfg(test)] use stdarch_test::assert_instr; "#, ); let mut tests_aarch64 = String::from( r#" #[cfg(test)] mod test { use super::*; use crate::core_arch::simd::*; use std::mem::transmute; use stdarch_test::simd_test; "#, ); for line in f.lines() { let line = line.unwrap(); if line.is_empty() { continue; } if line.starts_with("/// ") { current_comment = line; current_name = None; current_fn = None; current_arm = None; current_aarch64 = None; link_aarch64 = None; link_arm = None; const_aarch64 = None; const_arm = None; current_tests = Vec::new(); constn = None; para_num = 2; suffix = Normal; a = Vec::new(); b = Vec::new(); c = Vec::new(); fixed = Vec::new(); n = None; multi_fn = Vec::new(); target = Default; fn_type = Fntype::Normal; separate = false; } else if line.starts_with("//") { } else if line.starts_with("name = ") { current_name = Some(String::from(&line[7..])); } else if line.starts_with("fn = ") { current_fn = Some(String::from(&line[5..])); } else if line.starts_with("multi_fn = ") { multi_fn.push(String::from(&line[11..])); } else if line.starts_with("constn = ") { constn = Some(String::from(&line[9..])); } else if line.starts_with("arm = ") { current_arm = Some(String::from(&line[6..])); } else if line.starts_with("aarch64 = ") { current_aarch64 = Some(String::from(&line[10..])); } else if line.starts_with("double-suffixes") { suffix = Double; } else if line.starts_with("no-q") { suffix = NoQ; } else if line.starts_with("noq-double-suffixes") { suffix = NoQDouble; } else if line.starts_with("n-suffix") { suffix = NSuffix; } else if line.starts_with("double-n-suffixes") { suffix = DoubleN; } else if line.starts_with("out-n-suffix") { suffix = OutNSuffix; } else if line.starts_with("noq-n-suffix") { suffix = NoQNSuffix; } else if line.starts_with("out-suffix") { suffix = OutSuffix; } else if line.starts_with("out-nox") { suffix = OutNox; } else if line.starts_with("in1-nox") { suffix = In1Nox; } else if line.starts_with("out-dup-nox") { suffix = OutDupNox; } else if line.starts_with("out-lane-nox") { suffix = OutLaneNox; } else if line.starts_with("in1-lane-nox") { suffix = In1LaneNox; } else if line.starts_with("lane-suffixes") { suffix = Lane; } else if line.starts_with("in2-suffix") { suffix = In2; } else if line.starts_with("in2-lane-suffixes") { suffix = In2Lane; } else if line.starts_with("out-lane-suffixes") { suffix = OutLane; } else if line.starts_with("rot-suffix") { suffix = Rot; } else if line.starts_with("rot-lane-suffixes") { suffix = RotLane; } else if line.starts_with("a = ") { a = line[4..].split(',').map(|v| v.trim().to_string()).collect(); } else if line.starts_with("b = ") { b = line[4..].split(',').map(|v| v.trim().to_string()).collect(); } else if line.starts_with("c = ") { c = line[4..].split(',').map(|v| v.trim().to_string()).collect(); } else if line.starts_with("n = ") { n = Some(String::from(&line[4..])); } else if line.starts_with("fixed = ") { fixed = line[8..].split(',').map(|v| v.trim().to_string()).collect(); } else if line.starts_with("validate ") { let e = line[9..].split(',').map(|v| v.trim().to_string()).collect(); current_tests.push((a.clone(), b.clone(), c.clone(), n.clone(), e)); } else if line.starts_with("link-aarch64 = ") { link_aarch64 = Some(String::from(&line[15..])); } else if line.starts_with("const-aarch64 = ") { const_aarch64 = Some(String::from(&line[16..])); } else if line.starts_with("link-arm = ") { link_arm = Some(String::from(&line[11..])); } else if line.starts_with("const-arm = ") { const_arm = Some(String::from(&line[12..])); } else if line.starts_with("load_fn") { fn_type = Fntype::Load; } else if line.starts_with("store_fn") { fn_type = Fntype::Store; } else if line.starts_with("arm-aarch64-separate") { separate = true; } else if line.starts_with("target = ") { target = match Some(String::from(&line[9..])) { Some(input) => match input.as_str() { "v7" => ArmV7, "vfp4" => Vfp4, "fp-armv8" => FPArmV8, "aes" => AES, "fcma" => FCMA, "dotprod" => Dotprod, "i8mm" => I8MM, "sha3" => SHA3, "rdm" => RDM, "sm4" => SM4, "frintts" => FTTS, _ => Default, }, _ => Default, } } else if line.starts_with("generate ") { let line = &line[9..]; let types: Vec<String> = line .split(',') .map(|v| v.trim().to_string()) .flat_map(|v| match v.as_str() { "uint*_t" => UINT_TYPES.iter().map(|v| v.to_string()).collect(), "uint64x*_t" => UINT_TYPES_64.iter().map(|v| v.to_string()).collect(), "int*_t" => INT_TYPES.iter().map(|v| v.to_string()).collect(), "int64x*_t" => INT_TYPES_64.iter().map(|v| v.to_string()).collect(), "float*_t" => FLOAT_TYPES.iter().map(|v| v.to_string()).collect(), "float64x*_t" => FLOAT_TYPES_64.iter().map(|v| v.to_string()).collect(), _ => vec![v], }) .collect(); for line in types { let spec: Vec<&str> = line.split(':').map(|e| e.trim()).collect(); let in_t: [&str; 3]; let out_t; if spec.len() == 1 { in_t = [spec[0], spec[0], spec[0]]; out_t = spec[0]; } else if spec.len() == 2 { in_t = [spec[0], spec[0], spec[0]]; out_t = spec[1]; } else if spec.len() == 3 { in_t = [spec[0], spec[1], spec[1]]; out_t = spec[2]; } else if spec.len() == 4 { in_t = [spec[0], spec[1], spec[2]]; out_t = spec[3]; } else { panic!("Bad spec: {}", line) } if b.len() == 0 { if matches!(fn_type, Fntype::Store) { para_num = 2; } else { para_num = 1; } } else if c.len() != 0 { para_num = 3; } let current_name = current_name.clone().unwrap(); if let Some(current_arm) = current_arm.clone() { let (function, test) = gen_arm( &current_comment, &current_fn, &current_name, &current_arm, &link_arm, &current_aarch64, &link_aarch64, &const_arm, &const_aarch64, &constn, &in_t, &out_t, &current_tests, suffix, para_num, target, &fixed, &multi_fn, fn_type, separate, ); out_arm.push_str(&function); tests_arm.push_str(&test); } else { let (function, test) = gen_aarch64( &current_comment, &current_fn, &current_name, &current_aarch64, &link_aarch64, &const_aarch64, &constn, &in_t, &out_t, &current_tests, suffix, para_num, target, &fixed, &multi_fn, fn_type, ); out_aarch64.push_str(&function); tests_aarch64.push_str(&test); } } } } tests_arm.push('}'); tests_arm.push('\n'); tests_aarch64.push('}'); tests_aarch64.push('\n'); let arm_out_path: PathBuf = PathBuf::from(env::var("OUT_DIR").unwrap_or("crates/core_arch".to_string())) .join("src") .join("arm_shared") .join("neon"); std::fs::create_dir_all(&arm_out_path)?; let mut file_arm = File::create(arm_out_path.join(ARM_OUT))?; file_arm.write_all(out_arm.as_bytes())?; file_arm.write_all(tests_arm.as_bytes())?; let aarch64_out_path: PathBuf = PathBuf::from(env::var("OUT_DIR").unwrap_or("crates/core_arch".to_string())) .join("src") .join("aarch64") .join("neon"); std::fs::create_dir_all(&aarch64_out_path)?; let mut file_aarch = File::create(aarch64_out_path.join(AARCH64_OUT))?; file_aarch.write_all(out_aarch64.as_bytes())?; file_aarch.write_all(tests_aarch64.as_bytes())?; /* if let Err(e) = Command::new("rustfmt") .arg(&arm_out_path) .arg(&aarch64_out_path) .status() { eprintln!("Could not format `{}`: {}", arm_out_path.to_str().unwrap(), e); eprintln!("Could not format `{}`: {}", aarch64_out_path.to_str().unwrap(), e); }; */ Ok(()) }
34.545294
170
0.43001
e55534e36fd2ed08708b077df16224a24a4f1b8e
7,985
#![allow(unused)] //! Implements a map from integer indices to data. //! Rather than storing data for every index, internally, this maps entire ranges to the data. //! To this end, the APIs all work on ranges, not on individual integers. Ranges are split as //! necessary (e.g. when [0,5) is first associated with X, and then [1,2) is mutated). //! Users must not depend on whether a range is coalesced or not, even though this is observable //! via the iteration APIs. use std::collections::BTreeMap; use std::ops; #[derive(Clone, Debug, PartialEq, Eq)] pub struct RangeMap<T> { map: BTreeMap<Range, T>, } // The derived `Ord` impl sorts first by the first field, then, if the fields are the same, // by the second field. // This is exactly what we need for our purposes, since a range query on a BTReeSet/BTreeMap will give us all // `MemoryRange`s whose `start` is <= than the one we're looking for, but not > the end of the range we're checking. // At the same time the `end` is irrelevant for the sorting and range searching, but used for the check. // This kind of search breaks, if `end < start`, so don't do that! #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] pub struct Range { start: u64, end: u64, // Invariant: end > start } impl Range { fn range(offset: u64, len: u64) -> ops::Range<Range> { assert!(len > 0); // We select all elements that are within // the range given by the offset into the allocation and the length. // This is sound if all ranges that intersect with the argument range, are in the // resulting range of ranges. let left = Range { // lowest range to include `offset` start: 0, end: offset + 1, }; let right = Range { // lowest (valid) range not to include `offset+len` start: offset + len, end: offset + len + 1, }; left..right } /// Tests if all of [offset, offset+len) are contained in this range. fn overlaps(&self, offset: u64, len: u64) -> bool { assert!(len > 0); offset < self.end && offset + len >= self.start } } impl<T> RangeMap<T> { pub fn new() -> RangeMap<T> { RangeMap { map: BTreeMap::new() } } fn iter_with_range<'a>( &'a self, offset: u64, len: u64, ) -> impl Iterator<Item = (&'a Range, &'a T)> + 'a { assert!(len > 0); self.map.range(Range::range(offset, len)).filter_map( move |(range, data)| { if range.overlaps(offset, len) { Some((range, data)) } else { None } }, ) } pub fn iter<'a>(&'a self, offset: u64, len: u64) -> impl Iterator<Item = &'a T> + 'a { self.iter_with_range(offset, len).map(|(_, data)| data) } fn split_entry_at(&mut self, offset: u64) where T: Clone, { let range = match self.iter_with_range(offset, 1).next() { Some((&range, _)) => range, None => return, }; assert!( range.start <= offset && range.end > offset, "We got a range that doesn't even contain what we asked for." ); // There is an entry overlapping this position, see if we have to split it if range.start < offset { let data = self.map.remove(&range).unwrap(); let old = self.map.insert( Range { start: range.start, end: offset, }, data.clone(), ); assert!(old.is_none()); let old = self.map.insert( Range { start: offset, end: range.end, }, data, ); assert!(old.is_none()); } } pub fn iter_mut_all<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> + 'a { self.map.values_mut() } /// Provide mutable iteration over everything in the given range. As a side-effect, /// this will split entries in the map that are only partially hit by the given range, /// to make sure that when they are mutated, the effect is constrained to the given range. pub fn iter_mut_with_gaps<'a>( &'a mut self, offset: u64, len: u64, ) -> impl Iterator<Item = &'a mut T> + 'a where T: Clone, { assert!(len > 0); // Preparation: Split first and last entry as needed. self.split_entry_at(offset); self.split_entry_at(offset + len); // Now we can provide a mutable iterator self.map.range_mut(Range::range(offset, len)).filter_map( move |(&range, data)| { if range.overlaps(offset, len) { assert!( offset <= range.start && offset + len >= range.end, "The splitting went wrong" ); Some(data) } else { // Skip this one None } }, ) } /// Provide a mutable iterator over everything in the given range, with the same side-effects as /// iter_mut_with_gaps. Furthermore, if there are gaps between ranges, fill them with the given default. /// This is also how you insert. pub fn iter_mut<'a>(&'a mut self, offset: u64, len: u64) -> impl Iterator<Item = &'a mut T> + 'a where T: Clone + Default, { // Do a first iteration to collect the gaps let mut gaps = Vec::new(); let mut last_end = offset; for (range, _) in self.iter_with_range(offset, len) { if last_end < range.start { gaps.push(Range { start: last_end, end: range.start, }); } last_end = range.end; } if last_end < offset + len { gaps.push(Range { start: last_end, end: offset + len, }); } // Add default for all gaps for gap in gaps { let old = self.map.insert(gap, Default::default()); assert!(old.is_none()); } // Now provide mutable iteration self.iter_mut_with_gaps(offset, len) } pub fn retain<F>(&mut self, mut f: F) where F: FnMut(&T) -> bool, { let mut remove = Vec::new(); for (range, data) in &self.map { if !f(data) { remove.push(*range); } } for range in remove { self.map.remove(&range); } } } #[cfg(test)] mod tests { use super::*; /// Query the map at every offset in the range and collect the results. fn to_vec<T: Copy>(map: &RangeMap<T>, offset: u64, len: u64) -> Vec<T> { (offset..offset + len) .into_iter() .map(|i| *map.iter(i, 1).next().unwrap()) .collect() } #[test] fn basic_insert() { let mut map = RangeMap::<i32>::new(); // Insert for x in map.iter_mut(10, 1) { *x = 42; } // Check assert_eq!(to_vec(&map, 10, 1), vec![42]); } #[test] fn gaps() { let mut map = RangeMap::<i32>::new(); for x in map.iter_mut(11, 1) { *x = 42; } for x in map.iter_mut(15, 1) { *x = 42; } // Now request a range that needs three gaps filled for x in map.iter_mut(10, 10) { if *x != 42 { *x = 23; } } assert_eq!( to_vec(&map, 10, 10), vec![23, 42, 23, 23, 23, 42, 23, 23, 23, 23] ); assert_eq!(to_vec(&map, 13, 5), vec![23, 23, 42, 23, 23]); } }
31.561265
116
0.50908
f81ca4b93e45f6bb080ad1dee51d3cb1123e75cb
102,495
#![doc = "generated by AutoRust"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] #![allow(clippy::redundant_clone)] use super::models; #[derive(Clone)] pub struct Client { endpoint: String, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, pipeline: azure_core::Pipeline, } #[derive(Clone)] pub struct ClientBuilder { credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, endpoint: Option<String>, scopes: Option<Vec<String>>, } pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD; impl ClientBuilder { pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self { Self { credential, endpoint: None, scopes: None, } } pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self { self.endpoint = Some(endpoint.into()); self } pub fn scopes(mut self, scopes: &[&str]) -> Self { self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect()); self } pub fn build(self) -> Client { let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned()); let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]); Client::new(endpoint, self.credential, scopes) } } impl Client { pub(crate) fn endpoint(&self) -> &str { self.endpoint.as_str() } pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential { self.credential.as_ref() } pub(crate) fn scopes(&self) -> Vec<&str> { self.scopes.iter().map(String::as_str).collect() } pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> { let mut context = azure_core::Context::default(); let mut request = request.into(); self.pipeline.send(&mut context, &mut request).await } pub fn new( endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, ) -> Self { let endpoint = endpoint.into(); let pipeline = azure_core::Pipeline::new( option_env!("CARGO_PKG_NAME"), option_env!("CARGO_PKG_VERSION"), azure_core::ClientOptions::default(), Vec::new(), Vec::new(), ); Self { endpoint, credential, scopes, pipeline, } } pub fn operations(&self) -> operations::Client { operations::Client(self.clone()) } pub fn signal_r(&self) -> signal_r::Client { signal_r::Client(self.clone()) } pub fn signal_r_private_endpoint_connections(&self) -> signal_r_private_endpoint_connections::Client { signal_r_private_endpoint_connections::Client(self.clone()) } pub fn signal_r_private_link_resources(&self) -> signal_r_private_link_resources::Client { signal_r_private_link_resources::Client(self.clone()) } pub fn signal_r_shared_private_link_resources(&self) -> signal_r_shared_private_link_resources::Client { signal_r_shared_private_link_resources::Client(self.clone()) } pub fn usages(&self) -> usages::Client { usages::Client(self.clone()) } } pub mod operations { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list(&self) -> list::Builder { list::Builder { client: self.0.clone() } } } pub mod list { use super::models; use azure_core::error::ResultExt; type Response = models::OperationList; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = &format!("{}/providers/Microsoft.SignalRService/operations", this.client.endpoint(),); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::OperationList = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } } pub mod signal_r { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn check_name_availability( &self, location: impl Into<String>, parameters: impl Into<models::NameAvailabilityParameters>, subscription_id: impl Into<String>, ) -> check_name_availability::Builder { check_name_availability::Builder { client: self.0.clone(), location: location.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), } } pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder { list_by_subscription::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), } } pub fn list_by_resource_group( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_resource_group::Builder { list_by_resource_group::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn create_or_update( &self, parameters: impl Into<models::SignalRResource>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn update( &self, parameters: impl Into<models::SignalRResource>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> update::Builder { update::Builder { client: self.0.clone(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn delete( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn list_keys( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> list_keys::Builder { list_keys::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn regenerate_key( &self, parameters: impl Into<models::RegenerateKeyParameters>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> regenerate_key::Builder { regenerate_key::Builder { client: self.0.clone(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn restart( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> restart::Builder { restart::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } } pub mod check_name_availability { use super::models; use azure_core::error::ResultExt; type Response = models::NameAvailability; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) location: String, pub(crate) parameters: models::NameAvailabilityParameters, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.SignalRService/locations/{}/checkNameAvailability", this.client.endpoint(), &this.subscription_id, &this.location ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&this.parameters)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::NameAvailability = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod list_by_subscription { use super::models; use azure_core::error::ResultExt; type Response = models::SignalRResourceList; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.SignalRService/signalR", this.client.endpoint(), &this.subscription_id ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRResourceList = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } pub mod list_by_resource_group { use super::models; use azure_core::error::ResultExt; type Response = models::SignalRResourceList; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR", this.client.endpoint(), &this.subscription_id, &this.resource_group_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRResourceList = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } pub mod get { use super::models; use azure_core::error::ResultExt; type Response = models::SignalRResource; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRResource = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod create_or_update { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Ok200(models::SignalRResource), Created201(models::SignalRResource), Accepted202, } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) parameters: models::SignalRResource, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&this.parameters)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRResource = serde_json::from_slice(&rsp_body)?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRResource = serde_json::from_slice(&rsp_body)?; Ok(Response::Created201(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod update { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Ok200(models::SignalRResource), Accepted202, } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) parameters: models::SignalRResource, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&this.parameters)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRResource = serde_json::from_slice(&rsp_body)?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod delete { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod list_keys { use super::models; use azure_core::error::ResultExt; type Response = models::SignalRKeys; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/listKeys", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRKeys = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod regenerate_key { use super::models; use azure_core::error::ResultExt; type Response = models::SignalRKeys; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) parameters: models::RegenerateKeyParameters, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/regenerateKey", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&this.parameters)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::ACCEPTED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRKeys = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod restart { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/restart", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } } pub mod usages { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list(&self, location: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder { list::Builder { client: self.0.clone(), location: location.into(), subscription_id: subscription_id.into(), } } } pub mod list { use super::models; use azure_core::error::ResultExt; type Response = models::SignalRUsageList; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) location: String, pub(crate) subscription_id: String, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.SignalRService/locations/{}/usages", this.client.endpoint(), &this.subscription_id, &this.location ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SignalRUsageList = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } } pub mod signal_r_private_endpoint_connections { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> list::Builder { list::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn get( &self, private_endpoint_connection_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), private_endpoint_connection_name: private_endpoint_connection_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn update( &self, private_endpoint_connection_name: impl Into<String>, parameters: impl Into<models::PrivateEndpointConnection>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> update::Builder { update::Builder { client: self.0.clone(), private_endpoint_connection_name: private_endpoint_connection_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn delete( &self, private_endpoint_connection_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), private_endpoint_connection_name: private_endpoint_connection_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } } pub mod list { use super::models; use azure_core::error::ResultExt; type Response = models::PrivateEndpointConnectionList; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/privateEndpointConnections" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::PrivateEndpointConnectionList = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } pub mod get { use super::models; use azure_core::error::ResultExt; type Response = models::PrivateEndpointConnection; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) private_endpoint_connection_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/privateEndpointConnections/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . private_endpoint_connection_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::PrivateEndpointConnection = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod update { use super::models; use azure_core::error::ResultExt; type Response = models::PrivateEndpointConnection; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) private_endpoint_connection_name: String, pub(crate) parameters: models::PrivateEndpointConnection, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/privateEndpointConnections/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . private_endpoint_connection_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&this.parameters)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::PrivateEndpointConnection = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod delete { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) private_endpoint_connection_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/privateEndpointConnections/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . private_endpoint_connection_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } } pub mod signal_r_private_link_resources { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> list::Builder { list::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } } pub mod list { use super::models; use azure_core::error::ResultExt; type Response = models::PrivateLinkResourceList; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/privateLinkResources", this.client.endpoint(), &this.subscription_id, &this.resource_group_name, &this.resource_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::PrivateLinkResourceList = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } } pub mod signal_r_shared_private_link_resources { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> list::Builder { list::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn get( &self, shared_private_link_resource_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), shared_private_link_resource_name: shared_private_link_resource_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn create_or_update( &self, shared_private_link_resource_name: impl Into<String>, parameters: impl Into<models::SharedPrivateLinkResource>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), shared_private_link_resource_name: shared_private_link_resource_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn delete( &self, shared_private_link_resource_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), shared_private_link_resource_name: shared_private_link_resource_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } } pub mod list { use super::models; use azure_core::error::ResultExt; type Response = models::SharedPrivateLinkResourceList; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/sharedPrivateLinkResources" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SharedPrivateLinkResourceList = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } pub mod get { use super::models; use azure_core::error::ResultExt; type Response = models::SharedPrivateLinkResource; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) shared_private_link_resource_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/sharedPrivateLinkResources/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . shared_private_link_resource_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SharedPrivateLinkResource = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod create_or_update { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Ok200(models::SharedPrivateLinkResource), Created201(models::SharedPrivateLinkResource), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) shared_private_link_resource_name: String, pub(crate) parameters: models::SharedPrivateLinkResource, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/sharedPrivateLinkResources/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . shared_private_link_resource_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&this.parameters)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SharedPrivateLinkResource = serde_json::from_slice(&rsp_body)?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::SharedPrivateLinkResource = serde_json::from_slice(&rsp_body)?; Ok(Response::Created201(rsp_value)) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod delete { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) shared_private_link_resource_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { #[doc = "only the first response will be fetched as long running operations are not supported yet"] pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.SignalRService/signalR/{}/sharedPrivateLinkResources/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . shared_private_link_resource_name) ; let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } }
54.431758
326
0.471623
e8f31e4b1be5b751ed02bf53def591f5e1c51e0c
19,902
use either::Either; use hir::{HasAttrs, ModuleDef, Semantics}; use ide_db::{ defs::{Definition, NameClass, NameRefClass}, RootDatabase, }; use syntax::{ ast, match_ast, AstNode, AstToken, SyntaxKind::*, SyntaxToken, TextSize, TokenAtOffset, T, }; use crate::{ display::TryToNav, doc_links::extract_definitions_from_markdown, runnables::doc_owner_to_def, FilePosition, NavigationTarget, RangeInfo, }; // Feature: Go to Definition // // Navigates to the definition of an identifier. // // |=== // | Editor | Shortcut // // | VS Code | kbd:[F12] // |=== pub(crate) fn goto_definition( db: &RootDatabase, position: FilePosition, ) -> Option<RangeInfo<Vec<NavigationTarget>>> { let sema = Semantics::new(db); let file = sema.parse(position.file_id).syntax().clone(); let original_token = pick_best(file.token_at_offset(position.offset))?; let token = sema.descend_into_macros(original_token.clone()); let parent = token.parent(); if let Some(comment) = ast::Comment::cast(token) { let nav = def_for_doc_comment(&sema, position, &comment)?.try_to_nav(db)?; return Some(RangeInfo::new(original_token.text_range(), vec![nav])); } let nav = match_ast! { match parent { ast::NameRef(name_ref) => { reference_definition(&sema, Either::Right(&name_ref)) }, ast::Name(name) => { let def = NameClass::classify(&sema, &name)?.referenced_or_defined(sema.db); def.try_to_nav(sema.db) }, ast::Lifetime(lt) => if let Some(name_class) = NameClass::classify_lifetime(&sema, &lt) { let def = name_class.referenced_or_defined(sema.db); def.try_to_nav(sema.db) } else { reference_definition(&sema, Either::Left(&lt)) }, _ => return None, } }; Some(RangeInfo::new(original_token.text_range(), nav.into_iter().collect())) } fn def_for_doc_comment( sema: &Semantics<RootDatabase>, position: FilePosition, doc_comment: &ast::Comment, ) -> Option<hir::ModuleDef> { let parent = doc_comment.syntax().parent(); let (link, ns) = extract_positioned_link_from_comment(position, doc_comment)?; let def = doc_owner_to_def(sema, parent)?; match def { Definition::ModuleDef(def) => match def { ModuleDef::Module(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::Function(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::Adt(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::Variant(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::Const(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::Static(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::Trait(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::TypeAlias(it) => it.resolve_doc_path(sema.db, &link, ns), ModuleDef::BuiltinType(_) => return None, }, Definition::Macro(it) => it.resolve_doc_path(sema.db, &link, ns), Definition::Field(it) => it.resolve_doc_path(sema.db, &link, ns), Definition::SelfType(_) | Definition::Local(_) | Definition::GenericParam(_) | Definition::Label(_) => return None, } } fn extract_positioned_link_from_comment( position: FilePosition, comment: &ast::Comment, ) -> Option<(String, Option<hir::Namespace>)> { let comment_range = comment.syntax().text_range(); let doc_comment = comment.doc_comment()?; let def_links = extract_definitions_from_markdown(doc_comment); let (def_link, ns, _) = def_links.iter().min_by_key(|(_, _, def_link_range)| { let matched_position = comment_range.start() + TextSize::from(def_link_range.start as u32); match position.offset.checked_sub(matched_position) { Some(distance) => distance, None => comment_range.end(), } })?; Some((def_link.to_string(), ns.clone())) } fn pick_best(tokens: TokenAtOffset<SyntaxToken>) -> Option<SyntaxToken> { return tokens.max_by_key(priority); fn priority(n: &SyntaxToken) -> usize { match n.kind() { IDENT | INT_NUMBER | LIFETIME_IDENT | T![self] | COMMENT => 2, kind if kind.is_trivia() => 0, _ => 1, } } } pub(crate) fn reference_definition( sema: &Semantics<RootDatabase>, name_ref: Either<&ast::Lifetime, &ast::NameRef>, ) -> Option<NavigationTarget> { let name_kind = name_ref.either( |lifetime| NameRefClass::classify_lifetime(sema, lifetime), |name_ref| NameRefClass::classify(sema, name_ref), )?; let def = name_kind.referenced(sema.db); def.try_to_nav(sema.db) } #[cfg(test)] mod tests { use ide_db::base_db::FileRange; use crate::fixture; fn check(ra_fixture: &str) { let (analysis, position, expected) = fixture::nav_target_annotation(ra_fixture); let mut navs = analysis.goto_definition(position).unwrap().expect("no definition found").info; if navs.len() == 0 { panic!("unresolved reference") } assert_eq!(navs.len(), 1); let nav = navs.pop().unwrap(); assert_eq!(expected, FileRange { file_id: nav.file_id, range: nav.focus_or_full_range() }); } #[test] fn goto_def_for_extern_crate() { check( r#" //- /main.rs crate:main deps:std extern crate std$0; //- /std/lib.rs crate:std // empty //^ file "#, ) } #[test] fn goto_def_for_renamed_extern_crate() { check( r#" //- /main.rs crate:main deps:std extern crate std as abc$0; //- /std/lib.rs crate:std // empty //^ file "#, ) } #[test] fn goto_def_in_items() { check( r#" struct Foo; //^^^ enum E { X(Foo$0) } "#, ); } #[test] fn goto_def_at_start_of_item() { check( r#" struct Foo; //^^^ enum E { X($0Foo) } "#, ); } #[test] fn goto_definition_resolves_correct_name() { check( r#" //- /lib.rs use a::Foo; mod a; mod b; enum E { X(Foo$0) } //- /a.rs struct Foo; //^^^ //- /b.rs struct Foo; "#, ); } #[test] fn goto_def_for_module_declaration() { check( r#" //- /lib.rs mod $0foo; //- /foo.rs // empty //^ file "#, ); check( r#" //- /lib.rs mod $0foo; //- /foo/mod.rs // empty //^ file "#, ); } #[test] fn goto_def_for_macros() { check( r#" macro_rules! foo { () => { () } } //^^^ fn bar() { $0foo!(); } "#, ); } #[test] fn goto_def_for_macros_from_other_crates() { check( r#" //- /lib.rs crate:main deps:foo use foo::foo; fn bar() { $0foo!(); } //- /foo/lib.rs crate:foo #[macro_export] macro_rules! foo { () => { () } } //^^^ "#, ); } #[test] fn goto_def_for_macros_in_use_tree() { check( r#" //- /lib.rs crate:main deps:foo use foo::foo$0; //- /foo/lib.rs crate:foo #[macro_export] macro_rules! foo { () => { () } } //^^^ "#, ); } #[test] fn goto_def_for_macro_defined_fn_with_arg() { check( r#" //- /lib.rs macro_rules! define_fn { ($name:ident) => (fn $name() {}) } define_fn!(foo); //^^^ fn bar() { $0foo(); } "#, ); } #[test] fn goto_def_for_macro_defined_fn_no_arg() { check( r#" //- /lib.rs macro_rules! define_fn { () => (fn foo() {}) } define_fn!(); //^^^^^^^^^^^^^ fn bar() { $0foo(); } "#, ); } #[test] fn goto_definition_works_for_macro_inside_pattern() { check( r#" //- /lib.rs macro_rules! foo {() => {0}} //^^^ fn bar() { match (0,1) { ($0foo!(), _) => {} } } "#, ); } #[test] fn goto_definition_works_for_macro_inside_match_arm_lhs() { check( r#" //- /lib.rs macro_rules! foo {() => {0}} //^^^ fn bar() { match 0 { $0foo!() => {} } } "#, ); } #[test] fn goto_def_for_use_alias() { check( r#" //- /lib.rs crate:main deps:foo use foo as bar$0; //- /foo/lib.rs crate:foo // empty //^ file "#, ); } #[test] fn goto_def_for_use_alias_foo_macro() { check( r#" //- /lib.rs crate:main deps:foo use foo::foo as bar$0; //- /foo/lib.rs crate:foo #[macro_export] macro_rules! foo { () => { () } } //^^^ "#, ); } #[test] fn goto_def_for_methods() { check( r#" struct Foo; impl Foo { fn frobnicate(&self) { } //^^^^^^^^^^ } fn bar(foo: &Foo) { foo.frobnicate$0(); } "#, ); } #[test] fn goto_def_for_fields() { check( r#" struct Foo { spam: u32, } //^^^^ fn bar(foo: &Foo) { foo.spam$0; } "#, ); } #[test] fn goto_def_for_record_fields() { check( r#" //- /lib.rs struct Foo { spam: u32, } //^^^^ fn bar() -> Foo { Foo { spam$0: 0, } } "#, ); } #[test] fn goto_def_for_record_pat_fields() { check( r#" //- /lib.rs struct Foo { spam: u32, } //^^^^ fn bar(foo: Foo) -> Foo { let Foo { spam$0: _, } = foo } "#, ); } #[test] fn goto_def_for_record_fields_macros() { check( r" macro_rules! m { () => { 92 };} struct Foo { spam: u32 } //^^^^ fn bar() -> Foo { Foo { spam$0: m!() } } ", ); } #[test] fn goto_for_tuple_fields() { check( r#" struct Foo(u32); //^^^ fn bar() { let foo = Foo(0); foo.$00; } "#, ); } #[test] fn goto_def_for_ufcs_inherent_methods() { check( r#" struct Foo; impl Foo { fn frobnicate() { } } //^^^^^^^^^^ fn bar(foo: &Foo) { Foo::frobnicate$0(); } "#, ); } #[test] fn goto_def_for_ufcs_trait_methods_through_traits() { check( r#" trait Foo { fn frobnicate(); } //^^^^^^^^^^ fn bar() { Foo::frobnicate$0(); } "#, ); } #[test] fn goto_def_for_ufcs_trait_methods_through_self() { check( r#" struct Foo; trait Trait { fn frobnicate(); } //^^^^^^^^^^ impl Trait for Foo {} fn bar() { Foo::frobnicate$0(); } "#, ); } #[test] fn goto_definition_on_self() { check( r#" struct Foo; impl Foo { //^^^ pub fn new() -> Self { Self$0 {} } } "#, ); check( r#" struct Foo; impl Foo { //^^^ pub fn new() -> Self$0 { Self {} } } "#, ); check( r#" enum Foo { A } impl Foo { //^^^ pub fn new() -> Self$0 { Foo::A } } "#, ); check( r#" enum Foo { A } impl Foo { //^^^ pub fn thing(a: &Self$0) { } } "#, ); } #[test] fn goto_definition_on_self_in_trait_impl() { check( r#" struct Foo; trait Make { fn new() -> Self; } impl Make for Foo { //^^^ fn new() -> Self { Self$0 {} } } "#, ); check( r#" struct Foo; trait Make { fn new() -> Self; } impl Make for Foo { //^^^ fn new() -> Self$0 { Self {} } } "#, ); } #[test] fn goto_def_when_used_on_definition_name_itself() { check( r#" struct Foo$0 { value: u32 } //^^^ "#, ); check( r#" struct Foo { field$0: string, } //^^^^^ "#, ); check( r#" fn foo_test$0() { } //^^^^^^^^ "#, ); check( r#" enum Foo$0 { Variant } //^^^ "#, ); check( r#" enum Foo { Variant1, Variant2$0, //^^^^^^^^ Variant3, } "#, ); check( r#" static INNER$0: &str = ""; //^^^^^ "#, ); check( r#" const INNER$0: &str = ""; //^^^^^ "#, ); check( r#" type Thing$0 = Option<()>; //^^^^^ "#, ); check( r#" trait Foo$0 { } //^^^ "#, ); check( r#" mod bar$0 { } //^^^ "#, ); } #[test] fn goto_from_macro() { check( r#" macro_rules! id { ($($tt:tt)*) => { $($tt)* } } fn foo() {} //^^^ id! { fn bar() { fo$0o(); } } mod confuse_index { fn foo(); } "#, ); } #[test] fn goto_through_format() { check( r#" #[macro_export] macro_rules! format { ($($arg:tt)*) => ($crate::fmt::format($crate::__export::format_args!($($arg)*))) } #[rustc_builtin_macro] #[macro_export] macro_rules! format_args { ($fmt:expr) => ({ /* compiler built-in */ }); ($fmt:expr, $($args:tt)*) => ({ /* compiler built-in */ }) } pub mod __export { pub use crate::format_args; fn foo() {} // for index confusion } fn foo() -> i8 {} //^^^ fn test() { format!("{}", fo$0o()) } "#, ); } #[test] fn goto_through_included_file() { check( r#" //- /main.rs #[rustc_builtin_macro] macro_rules! include {} include!("foo.rs"); //^^^^^^^^^^^^^^^^^^^ fn f() { foo$0(); } mod confuse_index { pub fn foo() {} } //- /foo.rs fn foo() {} "#, ); } #[test] fn goto_for_type_param() { check( r#" struct Foo<T: Clone> { t: $0T } //^ "#, ); } #[test] fn goto_within_macro() { check( r#" macro_rules! id { ($($tt:tt)*) => ($($tt)*) } fn foo() { let x = 1; //^ id!({ let y = $0x; let z = y; }); } "#, ); check( r#" macro_rules! id { ($($tt:tt)*) => ($($tt)*) } fn foo() { let x = 1; id!({ let y = x; //^ let z = $0y; }); } "#, ); } #[test] fn goto_def_in_local_fn() { check( r#" fn main() { fn foo() { let x = 92; //^ $0x; } } "#, ); } #[test] fn goto_def_in_local_macro() { check( r#" fn bar() { macro_rules! foo { () => { () } } //^^^ $0foo!(); } "#, ); } #[test] fn goto_def_for_field_init_shorthand() { check( r#" struct Foo { x: i32 } fn main() { let x = 92; //^ Foo { x$0 }; } "#, ) } #[test] fn goto_def_for_enum_variant_field() { check( r#" enum Foo { Bar { x: i32 } } //^ fn baz(foo: Foo) { match foo { Foo::Bar { x$0 } => x }; } "#, ); } #[test] fn goto_def_for_enum_variant_self_pattern_const() { check( r#" enum Foo { Bar } //^^^ impl Foo { fn baz(self) { match self { Self::Bar$0 => {} } } } "#, ); } #[test] fn goto_def_for_enum_variant_self_pattern_record() { check( r#" enum Foo { Bar { val: i32 } } //^^^ impl Foo { fn baz(self) -> i32 { match self { Self::Bar$0 { val } => {} } } } "#, ); } #[test] fn goto_def_for_enum_variant_self_expr_const() { check( r#" enum Foo { Bar } //^^^ impl Foo { fn baz(self) { Self::Bar$0; } } "#, ); } #[test] fn goto_def_for_enum_variant_self_expr_record() { check( r#" enum Foo { Bar { val: i32 } } //^^^ impl Foo { fn baz(self) { Self::Bar$0 {val: 4}; } } "#, ); } #[test] fn goto_def_for_type_alias_generic_parameter() { check( r#" type Alias<T> = T$0; //^ "#, ) } #[test] fn goto_def_for_macro_container() { check( r#" //- /lib.rs crate:main deps:foo foo::module$0::mac!(); //- /foo/lib.rs crate:foo pub mod module { //^^^^^^ #[macro_export] macro_rules! _mac { () => { () } } pub use crate::_mac as mac; } "#, ); } #[test] fn goto_def_for_assoc_ty_in_path() { check( r#" trait Iterator { type Item; //^^^^ } fn f() -> impl Iterator<Item$0 = u8> {} "#, ); } #[test] fn goto_def_for_assoc_ty_in_path_multiple() { check( r#" trait Iterator { type A; //^ type B; } fn f() -> impl Iterator<A$0 = u8, B = ()> {} "#, ); check( r#" trait Iterator { type A; type B; //^ } fn f() -> impl Iterator<A = u8, B$0 = ()> {} "#, ); } #[test] fn goto_def_for_assoc_ty_ufcs() { check( r#" trait Iterator { type Item; //^^^^ } fn g() -> <() as Iterator<Item$0 = ()>>::Item {} "#, ); } #[test] fn goto_def_for_assoc_ty_ufcs_multiple() { check( r#" trait Iterator { type A; //^ type B; } fn g() -> <() as Iterator<A$0 = (), B = u8>>::B {} "#, ); check( r#" trait Iterator { type A; type B; //^ } fn g() -> <() as Iterator<A = (), B$0 = u8>>::A {} "#, ); } #[test] fn goto_self_param_ty_specified() { check( r#" struct Foo {} impl Foo { fn bar(self: &Foo) { //^^^^ let foo = sel$0f; } }"#, ) } #[test] fn goto_self_param_on_decl() { check( r#" struct Foo {} impl Foo { fn bar(&self$0) { //^^^^ } }"#, ) } #[test] fn goto_lifetime_param_on_decl() { check( r#" fn foo<'foobar$0>(_: &'foobar ()) { //^^^^^^^ }"#, ) } #[test] fn goto_lifetime_param_decl() { check( r#" fn foo<'foobar>(_: &'foobar$0 ()) { //^^^^^^^ }"#, ) } #[test] fn goto_lifetime_param_decl_nested() { check( r#" fn foo<'foobar>(_: &'foobar ()) { fn foo<'foobar>(_: &'foobar$0 ()) {} //^^^^^^^ }"#, ) } #[test] #[ignore] // requires the HIR to somehow track these hrtb lifetimes fn goto_lifetime_hrtb() { check( r#"trait Foo<T> {} fn foo<T>() where for<'a> T: Foo<&'a$0 (u8, u16)>, {} //^^ "#, ); check( r#"trait Foo<T> {} fn foo<T>() where for<'a$0> T: Foo<&'a (u8, u16)>, {} //^^ "#, ); } #[test] #[ignore] // requires ForTypes to be implemented fn goto_lifetime_hrtb_for_type() { check( r#"trait Foo<T> {} fn foo<T>() where T: for<'a> Foo<&'a$0 (u8, u16)>, {} //^^ "#, ); } #[test] fn goto_label() { check( r#" fn foo<'foo>(_: &'foo ()) { 'foo: { //^^^^ 'bar: loop { break 'foo$0; } } }"#, ) } #[test] fn goto_def_for_intra_doc_link_same_file() { check( r#" /// Blah, [`bar`](bar) .. [`foo`](foo)$0 has [`bar`](bar) pub fn bar() { } /// You might want to see [`std::fs::read()`] too. pub fn foo() { } //^^^ }"#, ) } #[test] fn goto_def_for_intra_doc_link_inner() { check( r#" //- /main.rs mod m; struct S; //^ //- /m.rs //! [`super::S$0`] "#, ) } #[test] fn goto_incomplete_field() { check( r#" struct A { a: u32 } //^ fn foo() { A { a$0: }; } "#, ) } }
16.923469
101
0.458647
69c7de93bbab4677668f00b95b970b4c139df1ad
1,699
#![no_std] #![no_main] #![feature(type_alias_impl_trait)] #[path = "../example_common.rs"] mod example_common; use defmt::panic; use embassy::executor::Spawner; use embassy::io::{AsyncBufReadExt, AsyncWriteExt}; use embassy_nrf::buffered_uarte::State; use embassy_nrf::gpio::NoPin; use embassy_nrf::{buffered_uarte::BufferedUarte, interrupt, uarte, Peripherals}; use example_common::*; use futures::pin_mut; #[embassy::main] async fn main(_spawner: Spawner, p: Peripherals) { let mut config = uarte::Config::default(); config.parity = uarte::Parity::EXCLUDED; config.baudrate = uarte::Baudrate::BAUD115200; let mut tx_buffer = [0u8; 4096]; let mut rx_buffer = [0u8; 4096]; let irq = interrupt::take!(UARTE0_UART0); let mut state = State::new(); let u = unsafe { BufferedUarte::new( &mut state, p.UARTE0, p.TIMER0, p.PPI_CH0, p.PPI_CH1, irq, p.P0_08, p.P0_06, NoPin, NoPin, config, &mut rx_buffer, &mut tx_buffer, ) }; pin_mut!(u); info!("uarte initialized!"); unwrap!(u.write_all(b"Hello!\r\n").await); info!("wrote hello in uart!"); // Simple demo, reading 8-char chunks and echoing them back reversed. loop { info!("reading..."); let mut buf = [0u8; 8]; unwrap!(u.read_exact(&mut buf).await); info!("read done, got {}", buf); // Reverse buf for i in 0..4 { buf.swap(i, 7 - i); } info!("writing..."); unwrap!(u.write_all(&buf).await); info!("write done"); } }
24.623188
80
0.560918
e4ec6bbe968bc089c09fbd30effe239501f402de
14,559
// ====================================== // This file was automatically generated. // ====================================== use crate::config::{Client, Response}; use crate::ids::SkuId; use crate::params::{Deleted, Expand, Expandable, IdOrCreate, List, Metadata, Object, Timestamp}; use crate::resources::{CreateProduct, Currency, PackageDimensions, Product}; use serde_derive::{Deserialize, Serialize}; /// The resource representing a Stripe "SKU". /// /// For more details see [https://stripe.com/docs/api/skus/object](https://stripe.com/docs/api/skus/object). #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Sku { /// Unique identifier for the object. pub id: SkuId, /// Whether the SKU is available for purchase. #[serde(skip_serializing_if = "Option::is_none")] pub active: Option<bool>, /// A dictionary of attributes and values for the attributes defined by the product. /// /// If, for example, a product's attributes are `["size", "gender"]`, a valid SKU has the following dictionary of attributes: `{"size": "Medium", "gender": "Unisex"}`. #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<Metadata>, /// Time at which the object was created. /// /// Measured in seconds since the Unix epoch. #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<Timestamp>, /// Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. /// /// Must be a [supported currency](https://stripe.com/docs/currencies). #[serde(skip_serializing_if = "Option::is_none")] pub currency: Option<Currency>, // Always true for a deleted object #[serde(default)] pub deleted: bool, /// The URL of an image for this SKU, meant to be displayable to the customer. #[serde(skip_serializing_if = "Option::is_none")] pub image: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub inventory: Option<Inventory>, /// Has the value `true` if the object exists in live mode or the value `false` if the object exists in test mode. #[serde(skip_serializing_if = "Option::is_none")] pub livemode: Option<bool>, /// Set of key-value pairs that you can attach to an object. /// /// This can be useful for storing additional information about the object in a structured format. #[serde(default)] pub metadata: Metadata, /// The dimensions of this SKU for shipping purposes. #[serde(skip_serializing_if = "Option::is_none")] pub package_dimensions: Option<PackageDimensions>, /// The cost of the item as a positive integer in the smallest currency unit (that is, 100 cents to charge $1.00, or 100 to charge ¥100, Japanese Yen being a zero-decimal currency). #[serde(skip_serializing_if = "Option::is_none")] pub price: Option<i64>, /// The ID of the product this SKU is associated with. /// /// The product must be currently active. #[serde(skip_serializing_if = "Option::is_none")] pub product: Option<Expandable<Product>>, #[serde(skip_serializing_if = "Option::is_none")] pub updated: Option<Timestamp>, } impl Sku { /// Returns a list of your SKUs. /// /// The SKUs are returned sorted by creation date, with the most recently created SKUs appearing first. pub fn list(client: &Client, params: ListSkus<'_>) -> Response<List<Sku>> { client.get_query("/skus", &params) } /// Creates a new SKU associated with a product. pub fn create(client: &Client, params: CreateSku<'_>) -> Response<Sku> { client.post_form("/skus", &params) } /// Retrieves the details of an existing SKU. /// /// Supply the unique SKU identifier from either a SKU creation request or from the product, and Stripe will return the corresponding SKU information. pub fn retrieve(client: &Client, id: &SkuId, expand: &[&str]) -> Response<Sku> { client.get_query(&format!("/skus/{}", id), &Expand { expand }) } /// Updates the specific SKU by setting the values of the parameters passed. /// /// Any parameters not provided will be left unchanged. Note that a SKU’s `attributes` are not editable. /// Instead, you would need to deactivate the existing SKU and create a new one with the new attribute values. pub fn update(client: &Client, id: &SkuId, params: UpdateSku<'_>) -> Response<Sku> { client.post_form(&format!("/skus/{}", id), &params) } /// Delete a SKU. /// /// Deleting a SKU is only possible until it has been used in an order. pub fn delete(client: &Client, id: &SkuId) -> Response<Deleted<SkuId>> { client.delete(&format!("/skus/{}", id)) } } impl Object for Sku { type Id = SkuId; fn id(&self) -> Self::Id { self.id.clone() } fn object(&self) -> &'static str { "sku" } } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Inventory { /// The count of inventory available. /// /// Will be present if and only if `type` is `finite`. #[serde(skip_serializing_if = "Option::is_none")] pub quantity: Option<u64>, /// Inventory type. /// /// Possible values are `finite`, `bucket` (not quantified), and `infinite`. #[serde(rename = "type")] pub type_: String, /// An indicator of the inventory available. /// /// Possible values are `in_stock`, `limited`, and `out_of_stock`. /// Will be present if and only if `type` is `bucket`. #[serde(skip_serializing_if = "Option::is_none")] pub value: Option<String>, } /// The parameters for `Sku::create`. #[derive(Clone, Debug, Serialize)] pub struct CreateSku<'a> { /// Whether the SKU is available for purchase. /// /// Default to `true`. #[serde(skip_serializing_if = "Option::is_none")] pub active: Option<bool>, /// A dictionary of attributes and values for the attributes defined by the product. /// /// If, for example, a product's attributes are `["size", "gender"]`, a valid SKU has the following dictionary of attributes: `{"size": "Medium", "gender": "Unisex"}`. #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<Metadata>, /// Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. /// /// Must be a [supported currency](https://stripe.com/docs/currencies). pub currency: Currency, /// Specifies which fields in the response should be expanded. #[serde(skip_serializing_if = "Expand::is_empty")] pub expand: &'a [&'a str], /// The identifier for the SKU. /// /// Must be unique. /// If not provided, an identifier will be randomly generated. #[serde(skip_serializing_if = "Option::is_none")] pub id: Option<&'a str>, /// The URL of an image for this SKU, meant to be displayable to the customer. #[serde(skip_serializing_if = "Option::is_none")] pub image: Option<&'a str>, /// Description of the SKU's inventory. #[serde(skip_serializing_if = "Option::is_none")] pub inventory: Option<Inventory>, /// A set of key-value pairs that you can attach to a SKU object. /// /// It can be useful for storing additional information about the SKU in a structured format. #[serde(skip_serializing_if = "Option::is_none")] pub metadata: Option<Metadata>, /// The dimensions of this SKU for shipping purposes. #[serde(skip_serializing_if = "Option::is_none")] pub package_dimensions: Option<PackageDimensions>, /// The cost of the item as a nonnegative integer in the smallest currency unit (that is, 100 cents to charge $1.00, or 100 to charge ¥100, Japanese Yen being a zero-decimal currency). pub price: i64, /// The ID of the product this SKU is associated with. /// /// Must be a product with type `good`. pub product: IdOrCreate<'a, CreateProduct<'a>>, } impl<'a> CreateSku<'a> { pub fn new( currency: Currency, inventory: Option<Inventory>, price: i64, product: IdOrCreate<'a, CreateProduct<'a>>, ) -> Self { CreateSku { active: Default::default(), attributes: Default::default(), currency, expand: Default::default(), id: Default::default(), image: Default::default(), inventory, metadata: Default::default(), package_dimensions: Default::default(), price, product, } } } /// The parameters for `Sku::list`. #[derive(Clone, Debug, Serialize, Default)] pub struct ListSkus<'a> { /// Only return SKUs that are active or inactive (e.g., pass `false` to list all inactive products). #[serde(skip_serializing_if = "Option::is_none")] pub active: Option<bool>, /// Only return SKUs that have the specified key-value pairs in this partially constructed dictionary. /// /// Can be specified only if `product` is also supplied. /// For instance, if the associated product has attributes `["color", "size"]`, passing in `attributes[color]=red` returns all the SKUs for this product that have `color` set to `red`. #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<Metadata>, /// A cursor for use in pagination. /// /// `ending_before` is an object ID that defines your place in the list. /// For instance, if you make a list request and receive 100 objects, starting with `obj_bar`, your subsequent call can include `ending_before=obj_bar` in order to fetch the previous page of the list. #[serde(skip_serializing_if = "Option::is_none")] pub ending_before: Option<SkuId>, /// Specifies which fields in the response should be expanded. #[serde(skip_serializing_if = "Expand::is_empty")] pub expand: &'a [&'a str], /// Only return SKUs with the given IDs. #[serde(skip_serializing_if = "Option::is_none")] pub ids: Option<Vec<String>>, /// Only return SKUs that are either in stock or out of stock (e.g., pass `false` to list all SKUs that are out of stock). /// /// If no value is provided, all SKUs are returned. #[serde(skip_serializing_if = "Option::is_none")] pub in_stock: Option<bool>, /// A limit on the number of objects to be returned. /// /// Limit can range between 1 and 100, and the default is 10. #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option<u64>, /// The ID of the product whose SKUs will be retrieved. /// /// Must be a product with type `good`. #[serde(skip_serializing_if = "Option::is_none")] pub product: Option<IdOrCreate<'a, CreateProduct<'a>>>, /// A cursor for use in pagination. /// /// `starting_after` is an object ID that defines your place in the list. /// For instance, if you make a list request and receive 100 objects, ending with `obj_foo`, your subsequent call can include `starting_after=obj_foo` in order to fetch the next page of the list. #[serde(skip_serializing_if = "Option::is_none")] pub starting_after: Option<SkuId>, } impl<'a> ListSkus<'a> { pub fn new() -> Self { ListSkus { active: Default::default(), attributes: Default::default(), ending_before: Default::default(), expand: Default::default(), ids: Default::default(), in_stock: Default::default(), limit: Default::default(), product: Default::default(), starting_after: Default::default(), } } } /// The parameters for `Sku::update`. #[derive(Clone, Debug, Serialize, Default)] pub struct UpdateSku<'a> { /// Whether this SKU is available for purchase. #[serde(skip_serializing_if = "Option::is_none")] pub active: Option<bool>, /// A dictionary of attributes and values for the attributes defined by the product. /// /// When specified, `attributes` will partially update the existing attributes dictionary on the product, with the postcondition that a value must be present for each attribute key on the product. #[serde(skip_serializing_if = "Option::is_none")] pub attributes: Option<Metadata>, /// Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. /// /// Must be a [supported currency](https://stripe.com/docs/currencies). #[serde(skip_serializing_if = "Option::is_none")] pub currency: Option<Currency>, /// Specifies which fields in the response should be expanded. #[serde(skip_serializing_if = "Expand::is_empty")] pub expand: &'a [&'a str], /// The URL of an image for this SKU, meant to be displayable to the customer. #[serde(skip_serializing_if = "Option::is_none")] pub image: Option<&'a str>, /// Description of the SKU's inventory. #[serde(skip_serializing_if = "Option::is_none")] pub inventory: Option<Inventory>, /// A set of key-value pairs that you can attach to a SKU object. /// /// It can be useful for storing additional information about the SKU in a structured format. #[serde(skip_serializing_if = "Option::is_none")] pub metadata: Option<Metadata>, /// The dimensions of this SKU for shipping purposes. #[serde(skip_serializing_if = "Option::is_none")] pub package_dimensions: Option<PackageDimensions>, /// The cost of the item as a positive integer in the smallest currency unit (that is, 100 cents to charge $1.00, or 100 to charge ¥100, Japanese Yen being a zero-decimal currency). #[serde(skip_serializing_if = "Option::is_none")] pub price: Option<i64>, /// The ID of the product that this SKU should belong to. /// /// The product must exist, have the same set of attribute names as the SKU's current product, and be of type `good`. #[serde(skip_serializing_if = "Option::is_none")] pub product: Option<IdOrCreate<'a, CreateProduct<'a>>>, } impl<'a> UpdateSku<'a> { pub fn new() -> Self { UpdateSku { active: Default::default(), attributes: Default::default(), currency: Default::default(), expand: Default::default(), image: Default::default(), inventory: Default::default(), metadata: Default::default(), package_dimensions: Default::default(), price: Default::default(), product: Default::default(), } } }
39.5625
204
0.647778
69afa8253deed07fec9ab0b708b04854ff2e38f0
1,421
#![allow(unused_must_use)] #![allow(unused_variables)] #![allow(dead_code)] use criterion::*; use uri_rs::parser; use uri_rs::parser::parsers::Elms; criterion_group!(benches, criterion_benchmark); const uri: &'static str = "http://user1:pass1@localhost:8080/example?key1=value1&key2=value2&key1=value2#f1"; #[inline] fn uri_parsers_uri() { let _ = parser::parsers::uri_parsers::uri(Elms::new(uri.as_bytes())).unwrap(); } const hier_part: &'static str = "//user1:pass1@localhost:80801"; #[inline] fn uri_hier_part_parsers_hier_part() { let _ = parser::parsers::hier_part_parsers::hier_part(Elms::new(hier_part.as_bytes())).unwrap(); } const query: &'static str = "key1=value1&key2=value2&key1=value2"; fn uri_query_parsers_query() { let _ = parser::parsers::query_parsers::query(Elms::new(query.as_bytes())).unwrap(); } fn criterion_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("uri"); let op = 0u8; group.bench_with_input( BenchmarkId::new("j5ik2o/uri_parsers_uri", op), &op, |b, i| b.iter(|| uri_parsers_uri()), ); group.bench_with_input( BenchmarkId::new("j5ik2o/uri_hier_part_parsers_hier_part", op), &op, |b, i| b.iter(|| uri_hier_part_parsers_hier_part()), ); group.bench_with_input( BenchmarkId::new("j5ik2o/uri_query_parsers_query", op), &op, |b, i| b.iter(|| uri_query_parsers_query()), ); } criterion_main! { benches, }
25.836364
98
0.697396
90bdaac5de70f1b8ed81c85a919eed4d5e2fba7d
649
use actix_web::{web, App, HttpServer, Responder}; use serde::Serialize; // <flexible-responders> #[derive(Serialize)] struct Measurement { temperature: f32, } async fn hello_world() -> impl Responder { "Hello World!" } async fn current_temperature() -> impl Responder { web::Json(Measurement { temperature: 42.3 }) } // </flexible-responders> #[actix_web::main] async fn main() -> std::io::Result<()> { HttpServer::new(|| { App::new() .service(web::resource("/").to(hello_world)) .service(web::resource("/temp").to(current_temperature)) }) .bind("127.0.0.1:8080")? .run() .await }
21.633333
68
0.613251
e4fb6e287d3fbbe10777a3fe24008c9bea96b13a
2,597
fn main() { #[cfg(not(feature = "colors"))] println!("Feature color is switched off"); #[cfg(feature = "colors")] { use ansi_term::Color; use atty::Stream::{Stderr, Stdout}; for i in 0..=255 { println!("{}: {}", i, Color::Fixed(i).paint(i.to_string())); } println!(); if atty::is(Stdout) { println!( "Stdout is considered a tty - \ flexi_logger::AdaptiveFormat will use colors", ); } else { println!( "Stdout is not considered a tty - \ flexi_logger::AdaptiveFormat will NOT use colors" ); } if atty::is(Stderr) { println!( "Stderr is considered a tty - \ flexi_logger::AdaptiveFormat will use colors", ); } else { println!( "Stderr is not considered a tty - \ flexi_logger::AdaptiveFormat will NOT use colors!" ); } #[cfg(target_os = "windows")] if ansi_term::enable_ansi_support().is_err() { println!("Unsupported windows console detected, coloring will likely not work"); } println!( "\n{}", Color::Fixed(196) .bold() .paint("err! output (red) with default palette") ); println!( "{}", Color::Fixed(208) .bold() .paint("warn! output (yellow) with default palette") ); println!("info! output (normal) with default palette"); println!( "{}", Color::Fixed(7).paint("debug! output (normal) with default palette") ); println!( "{}", Color::Fixed(8).paint("trace! output (grey) with default palette") ); println!( "\n{}", Color::Red .bold() .paint("err! output (red) with env_logger-palette") ); println!( "{}", Color::Yellow.paint("warn! output (yellow) with env_logger-palette") ); println!( "{}", Color::Green.paint("info! output (green) with env_logger-palette") ); println!( "{}", Color::Blue.paint("debug! output (blue) with env_logger-palette") ); println!( "{}", Color::Cyan.paint("trace! output (cyan) with env_logger-palette") ); } }
28.538462
92
0.45206
f7e7a7a39da11f78800d9ab686d60b5c390bc2b3
1,528
use libc::c_char; use std::ffi::{CStr, CString, OsStr}; use std::os::unix::ffi::OsStrExt; use std::path::Path; use ffi_support::{call_with_result, ExternError}; use playground_utils::{do_compile_task, list_toolchains, Task}; #[no_mangle] pub extern "C" fn playgroundGetToolchains(err: &mut ExternError) -> *const c_char { call_with_result(err, || { list_toolchains() .map(|r| serde_json::to_string(&r).unwrap()) }) } #[no_mangle] pub extern "C" fn playgroundExecuteTask( path: *const c_char, cmd_json: *const c_char, std_err_callback: extern "C" fn(*const c_char), err: &mut ExternError, ) -> *const c_char { call_with_result(err, || { eprintln!("playground execute task"); let path = unsafe { CStr::from_ptr(path) }; let json = unsafe { CStr::from_ptr(cmd_json) }; let path = Path::new(OsStr::from_bytes(path.to_bytes())); let json = json.to_str().expect("json must be valid utf8"); let task: Task = serde_json::from_str(json).expect("malformed task json"); do_compile_task(path, task, |stderr| { let cstring = CString::new(stderr) .unwrap_or_else(|_| CString::new("null byte in stderr").unwrap()); std_err_callback(cstring.as_ptr()); }) .map(|r| serde_json::to_string(&r).unwrap()) }) } #[no_mangle] pub extern "C" fn playgroundStringFree(ptr: *mut c_char) { if ptr.is_null() { return; } unsafe { CString::from_raw(ptr); } }
30.56
83
0.621728
914255d66956bc36cf95ce34717cb22f1da5d2af
1,618
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0 */ // This code was copied and then modified from Tokio's Axum. /* Copyright (c) 2022 Tower Contributors * * Permission is hereby granted, free of charge, to any * person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the * Software without restriction, including without * limitation the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software * is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice * shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED * TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT * SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR * IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ use crate::body::BoxBody; #[doc(hidden)] pub type Response<T = BoxBody> = http::Response<T>; /// Trait for generating responses. /// /// Types that implement `IntoResponse` can be returned from handlers. pub trait IntoResponse { /// Create a response. fn into_response(self) -> Response; }
34.425532
70
0.73733
0ed77b12f6db0b351c6299fabea551d8e7eb3ab0
3,759
use std::sync::Arc; use crate::gameboy::memory::regions::*; use crate::gameboy::memory::GameboyCart; use crate::gameboy::memory::cart::CartHeader; pub struct MBC5 { header: Arc<CartHeader>, rom_banks: Vec<Vec<u8>>, ram_banks: Vec<Vec<u8>>, romb0: u8, romb1: u8, ramb: u8, ram_enabled: bool } impl MBC5 { pub fn new(header: Arc<CartHeader>, data: Vec<u8>) -> MBC5 { let rom_banks = { let mut result = Vec::new(); let chunks = data.chunks(16384); for chunk in chunks { result.push(chunk.to_vec()); } result }; let ram_banks = { if let Ok(data) = std::fs::read(format!("ram/{}.bin", header.title())) { let mut result = Vec::with_capacity(8192 * header.ram_banks_count()); for chunk in data.chunks_exact(8192) { result.push(chunk.to_vec()); } result } else { vec![vec![0; 8192]; header.ram_banks_count] } }; MBC5 { header, rom_banks, ram_banks, romb0: 0, romb1: 0, ramb: 0, ram_enabled: false } } fn save_ram(&self) { let mut data = Vec::with_capacity(8192 * self.ram_banks.len()); for bank in self.ram_banks.iter() { for byte in bank { data.push(*byte); } } if let Err(error) = std::fs::create_dir("ram") { if error.kind() != std::io::ErrorKind::AlreadyExists { println!("Error creating RAM directory: {}", error.to_string()); } } if let Err(error) = std::fs::write(format!("ram/{}.bin", self.header.title()), data) { println!("Error saving ram contents: {}", error.to_string()); } } fn get_rom_bank(&self) -> usize { (((self.romb1 as u16) << 9) | self.romb0 as u16) as usize } } impl GameboyCart for MBC5 { fn read(&self, address: u16) -> u8 { if CARTRIDGE_ROM_BANK0.contains(&address) { self.rom_banks[0][address as usize] } else if CARTRIDGE_ROM_BANKX.contains(&address) { let address = (address - 0x4000) as usize; self.rom_banks[self.get_selected_rom_bank()][address] } else if CARTRIDGE_RAM.contains(&address) { let address = (address - 0xA000) as usize; self.ram_banks[self.get_selected_ram_bank()][address] } else { unreachable!() } } fn write(&mut self, address: u16, value: u8) { if MBC5_RAMG.contains(&address) { self.ram_enabled = value == 0b00001010; if !self.ram_enabled { self.save_ram(); } } else if MBC5_ROMB0.contains(&address) { self.romb0 = value; } else if MBC5_ROMB1.contains(&address) { self.romb1 = value & 1; } else if MBC5_RAMB.contains(&address) { self.ramb = value & 0b00001111; } } // TODO: Get this to work properly with banking. fn dbg_write(&mut self, address: u16, value: u8) { } fn reset(&mut self) { self.romb0 = 0; self.romb1 = 0; self.ram_enabled = false; } fn get_header(&self) -> Arc<CartHeader> { self.header.clone() } fn is_ram_enabled(&self) -> bool { self.ram_enabled } fn get_selected_rom_bank(&self) -> usize { self.get_rom_bank() } fn get_selected_ram_bank(&self) -> usize { self.ramb as usize } }
24.89404
94
0.507582
b9f31cdf8be75e032cd31b39cfaeb20c8caab78e
5,436
use { crate::{ ast::{ColumnDef, ColumnOption}, data::{Interval, Row, Value}, result::Result, store::Store, }, chrono::{NaiveDate, NaiveDateTime, NaiveTime}, im_rc::HashSet, rust_decimal::Decimal, serde::Serialize, std::{fmt::Debug, rc::Rc}, thiserror::Error as ThisError, utils::Vector, }; #[derive(ThisError, Debug, PartialEq, Serialize)] pub enum ValidateError { #[error("conflict! storage row has no column on index {0}")] ConflictOnStorageColumnIndex(usize), #[error("duplicate entry '{0:?}' for unique column '{1}'")] DuplicateEntryOnUniqueField(Value, String), } pub enum ColumnValidation { All(Rc<[ColumnDef]>), SpecifiedColumns(Rc<[ColumnDef]>, Vec<String>), } #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub enum UniqueKey { Bool(bool), I8(i8), I64(i64), Str(String), Date(NaiveDate), Timestamp(NaiveDateTime), Time(NaiveTime), Interval(Interval), Uuid(u128), Decimal(Decimal), } #[derive(Debug)] struct UniqueConstraint { column_index: usize, column_name: String, keys: HashSet<UniqueKey>, } impl UniqueConstraint { fn new(column_index: usize, column_name: String) -> Self { Self { column_index, column_name, keys: HashSet::new(), } } fn add(self, value: &Value) -> Result<Self> { let new_key = match self.check(value)? { Some(new_key) => new_key, None => { return Ok(self); } }; let keys = self.keys.update(new_key); Ok(Self { column_index: self.column_index, column_name: self.column_name, keys, }) } fn check(&self, value: &Value) -> Result<Option<UniqueKey>> { match value.try_into()? { Some(new_key) if self.keys.contains(&new_key) => { Err(ValidateError::DuplicateEntryOnUniqueField( value.clone(), self.column_name.to_owned(), ) .into()) } new_key => Ok(new_key), } } } pub async fn validate_unique<T: Debug>( storage: &impl Store<T>, table_name: &str, column_validation: ColumnValidation, row_iter: impl Iterator<Item = &Row> + Clone, ) -> Result<()> { let columns = match column_validation { ColumnValidation::All(column_defs) => fetch_all_unique_columns(&column_defs), ColumnValidation::SpecifiedColumns(column_defs, specified_columns) => { fetch_specified_unique_columns(&column_defs, &specified_columns) } }; let unique_constraints: Vec<_> = create_unique_constraints(columns, row_iter)?.into(); if unique_constraints.is_empty() { return Ok(()); } let unique_constraints = Rc::new(unique_constraints); storage.scan_data(table_name).await?.try_for_each(|result| { let (_, row) = result?; Rc::clone(&unique_constraints) .iter() .try_for_each(|constraint| { let col_idx = constraint.column_index; let val = row .get_value(col_idx) .ok_or(ValidateError::ConflictOnStorageColumnIndex(col_idx))?; constraint.check(val)?; Ok(()) }) }) } fn create_unique_constraints<'a>( unique_columns: Vec<(usize, String)>, row_iter: impl Iterator<Item = &'a Row> + Clone, ) -> Result<Vector<UniqueConstraint>> { unique_columns .into_iter() .try_fold(Vector::new(), |constraints, col| { let (col_idx, col_name) = col; let new_constraint = UniqueConstraint::new(col_idx, col_name); let new_constraint = row_iter .clone() .try_fold(new_constraint, |constraint, row| { let val = row .get_value(col_idx) .ok_or(ValidateError::ConflictOnStorageColumnIndex(col_idx))?; constraint.add(val) })?; Ok(constraints.push(new_constraint)) }) } fn fetch_all_unique_columns(column_defs: &[ColumnDef]) -> Vec<(usize, String)> { column_defs .iter() .enumerate() .filter_map(|(i, table_col)| { if table_col .options .iter() .any(|opt_def| matches!(opt_def.option, ColumnOption::Unique { .. })) { Some((i, table_col.name.to_owned())) } else { None } }) .collect() } fn fetch_specified_unique_columns( all_column_defs: &[ColumnDef], specified_columns: &[String], ) -> Vec<(usize, String)> { all_column_defs .iter() .enumerate() .filter_map(|(i, table_col)| { if table_col .options .iter() .any(|opt_def| match opt_def.option { ColumnOption::Unique { .. } => specified_columns .iter() .any(|specified_col| specified_col == &table_col.name), _ => false, }) { Some((i, table_col.name.to_owned())) } else { None } }) .collect() }
28.3125
90
0.53716
e27c80d794ffbb628928aee9433b95b87d818cdd
4,333
use crate::{ chain_spec, cli::{Cli, Subcommand}, service, }; use contracts_node_runtime::Block; use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; impl SubstrateCli for Cli { fn impl_name() -> String { "Substrate Contracts Node".into() } fn impl_version() -> String { env!("SUBSTRATE_CLI_IMPL_VERSION").into() } fn description() -> String { env!("CARGO_PKG_DESCRIPTION").into() } fn author() -> String { env!("CARGO_PKG_AUTHORS").into() } fn support_url() -> String { "https://github.com/paritytech/canvas-node/issues/new".into() } fn copyright_start_year() -> i32 { 2021 } fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> { Ok(match id { "dev" => Box::new(chain_spec::development_config()?), "" | "local" => Box::new(chain_spec::local_testnet_config()?), path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } fn native_runtime_version(_: &Box<dyn ChainSpec>) -> &'static RuntimeVersion { &contracts_node_runtime::VERSION } } /// Parse and run command line arguments pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); match &cli.subcommand { Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) }, Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, import_queue, .. } = service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.database), task_manager)) }) }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, import_queue, .. } = service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.database)) }, Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, backend, .. } = service::new_partial(&config)?; Ok((cmd.run(client, backend, None), task_manager)) }) }, Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::<Block, service::ExecutorDispatch>(config)) } else { Err("Benchmarking wasn't enabled when building the node. You can enable it with \ `--features runtime-benchmarks`." .into()) }, #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { // we don't need any of the components of new_partial, just a runtime, or a task // manager to do `async_run`. let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); let task_manager = sc_service::TaskManager::new(config.tokio_handle.clone(), registry) .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; Ok((cmd.run::<Block, service::ExecutorDispatch>(config), task_manager)) }) }, #[cfg(not(feature = "try-runtime"))] Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ You can enable it with `--features try-runtime`." .into()), None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { service::new_full(config).map_err(sc_cli::Error::Service) }) }, } }
31.860294
90
0.664436
7a12db8fa06d00b0332d3aba5d99c859c434fc7a
765
use cis_client::settings::CisSettings; use config::{Config, ConfigError, Environment, File}; use serde::Deserialize; use std::env; #[derive(Debug, Deserialize, Clone)] pub struct AvatarSettings { pub s3_bucket: String, pub retrieve_by_id_path: String, pub picture_api_url: String, } #[derive(Debug, Deserialize)] pub struct Settings { pub auth: String, pub cis: CisSettings, pub avatar: AvatarSettings, } impl Settings { pub fn new() -> Result<Self, ConfigError> { let file = env::var("DPF_SETTINGS").unwrap_or_else(|_| String::from(".settings.json")); let mut s = Config::new(); s.merge(File::with_name(&file))?; s.merge(Environment::new().separator("__").prefix("dp"))?; s.try_into() } }
26.37931
95
0.657516
d744193247c49b59d36176873dbf9452012d1de0
11,934
//! Support for a calling of an imported function. extern crate alloc; use pyo3::prelude::*; use pyo3::types::{PyAny, PyDict, PyTuple}; use crate::code_memory::CodeMemory; use crate::function::Function; use crate::memory::Memory; use crate::value::{read_value_from, write_value_to}; use cranelift_codegen::ir::types; use cranelift_codegen::ir::{InstBuilder, StackSlotData, StackSlotKind}; use cranelift_codegen::Context; use cranelift_codegen::{binemit, ir, isa}; use cranelift_entity::{EntityRef, PrimaryMap}; use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext}; use cranelift_wasm::{DefinedFuncIndex, FuncIndex}; use target_lexicon::HOST; use wasmtime_environ::{Export, Module}; use wasmtime_runtime::{Imports, InstanceHandle, VMContext, VMFunctionBody}; use alloc::rc::Rc; use core::cell::RefCell; use core::cmp; use std::collections::{HashMap, HashSet}; struct BoundPyFunction { name: String, obj: PyObject, } struct ImportObjState { calls: Vec<BoundPyFunction>, #[allow(dead_code)] code_memory: CodeMemory, } unsafe extern "C" fn stub_fn(vmctx: *mut VMContext, call_id: u32, values_vec: *mut i64) { let gil = Python::acquire_gil(); let py = gil.python(); let mut instance = InstanceHandle::from_vmctx(vmctx); let (_name, obj) = { let state = instance .host_state() .downcast_mut::<ImportObjState>() .expect("state"); let name = state.calls[call_id as usize].name.to_owned(); let obj = state.calls[call_id as usize].obj.clone_ref(py); (name, obj) }; let module = instance.module_ref(); let signature = &module.signatures[module.functions[FuncIndex::new(call_id as usize)]]; let mut args = Vec::new(); for i in 1..signature.params.len() { args.push(read_value_from( py, values_vec.offset(i as isize - 1), signature.params[i].value_type, )) } let result = obj.call(py, PyTuple::new(py, args), None).expect("result"); for i in 0..signature.returns.len() { let val = if result.is_none() { 0.into_py(py) // FIXME default ??? } else { if i > 0 { panic!("multiple returns unsupported"); } result.clone_ref(py) }; write_value_to( py, values_vec.offset(i as isize), signature.returns[i].value_type, val, ); } } /// Create a trampoline for invoking a python function. fn make_trampoline( isa: &dyn isa::TargetIsa, code_memory: &mut CodeMemory, fn_builder_ctx: &mut FunctionBuilderContext, call_id: u32, signature: &ir::Signature, ) -> *const VMFunctionBody { // Mostly reverse copy of the similar method from wasmtime's // wasmtime-jit/src/compiler.rs. let pointer_type = isa.pointer_type(); let mut stub_sig = ir::Signature::new(isa.frontend_config().default_call_conv); // Add the `vmctx` parameter. stub_sig.params.push(ir::AbiParam::special( pointer_type, ir::ArgumentPurpose::VMContext, )); // Add the `call_id` parameter. stub_sig.params.push(ir::AbiParam::new(types::I32)); // Add the `values_vec` parameter. stub_sig.params.push(ir::AbiParam::new(pointer_type)); let values_vec_len = 8 * cmp::max(signature.params.len() - 1, signature.returns.len()) as u32; let mut context = Context::new(); context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), signature.clone()); let ss = context.func.create_stack_slot(StackSlotData::new( StackSlotKind::ExplicitSlot, values_vec_len, )); let value_size = 8; { let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx); let block0 = builder.create_ebb(); builder.append_ebb_params_for_function_params(block0); builder.switch_to_block(block0); builder.seal_block(block0); let values_vec_ptr_val = builder.ins().stack_addr(pointer_type, ss, 0); let mflags = ir::MemFlags::trusted(); for i in 1..signature.params.len() { if i == 0 { continue; } let val = builder.func.dfg.ebb_params(block0)[i]; builder.ins().store( mflags, val, values_vec_ptr_val, ((i - 1) * value_size) as i32, ); } let vmctx_ptr_val = builder.func.dfg.ebb_params(block0)[0]; let call_id_val = builder.ins().iconst(types::I32, call_id as i64); let callee_args = vec![vmctx_ptr_val, call_id_val, values_vec_ptr_val]; let new_sig = builder.import_signature(stub_sig.clone()); let callee_value = builder .ins() .iconst(pointer_type, stub_fn as *const VMFunctionBody as i64); builder .ins() .call_indirect(new_sig, callee_value, &callee_args); let mflags = ir::MemFlags::trusted(); let mut results = Vec::new(); for (i, r) in signature.returns.iter().enumerate() { let load = builder.ins().load( r.value_type, mflags, values_vec_ptr_val, (i * value_size) as i32, ); results.push(load); } builder.ins().return_(&results); builder.finalize() } let mut code_buf: Vec<u8> = Vec::new(); let mut reloc_sink = RelocSink {}; let mut trap_sink = binemit::NullTrapSink {}; let mut stackmap_sink = binemit::NullStackmapSink {}; context .compile_and_emit( isa, &mut code_buf, &mut reloc_sink, &mut trap_sink, &mut stackmap_sink, ) .expect("compile_and_emit"); code_memory .allocate_copy_of_byte_slice(&code_buf) .expect("allocate_copy_of_byte_slice") .as_ptr() } fn parse_annotation_type(s: &str) -> ir::Type { match s { "I32" | "i32" => types::I32, "I64" | "i64" => types::I64, "F32" | "f32" => types::F32, "F64" | "f64" => types::F64, _ => panic!("unknown type in annotations"), } } fn get_signature_from_py_annotation( annot: &PyDict, pointer_type: ir::Type, call_conv: isa::CallConv, ) -> PyResult<ir::Signature> { let mut params = Vec::new(); params.push(ir::AbiParam::special( pointer_type, ir::ArgumentPurpose::VMContext, )); let mut returns = None; for (name, value) in annot.iter() { let ty = parse_annotation_type(&value.to_string()); match name.to_string().as_str() { "return" => returns = Some(ty), _ => params.push(ir::AbiParam::new(ty)), } } Ok(ir::Signature { params, returns: match returns { Some(r) => vec![ir::AbiParam::new(r)], None => vec![], }, call_conv, }) } pub fn into_instance_from_obj( py: Python, global_exports: Rc<RefCell<HashMap<String, Option<wasmtime_runtime::Export>>>>, obj: &PyAny, ) -> PyResult<InstanceHandle> { let isa = { let isa_builder = cranelift_native::builder().expect("host machine is not a supported target"); let flag_builder = cranelift_codegen::settings::builder(); isa_builder.finish(cranelift_codegen::settings::Flags::new(flag_builder)) }; let mut fn_builder_ctx = FunctionBuilderContext::new(); let mut module = Module::new(); let mut finished_functions: PrimaryMap<DefinedFuncIndex, *const VMFunctionBody> = PrimaryMap::new(); let mut code_memory = CodeMemory::new(); let pointer_type = types::Type::triple_pointer_type(&HOST); let call_conv = isa::CallConv::triple_default(&HOST); let obj = obj.cast_as::<PyDict>()?; let mut bound_functions = Vec::new(); let mut dependencies = HashSet::new(); let mut memories = PrimaryMap::new(); for (name, item) in obj.iter() { if item.is_callable() { let sig = if item.get_type().is_subclass::<Function>()? { // TODO faster calls? let wasm_fn = item.cast_as::<Function>()?; dependencies.insert(wasm_fn.instance.clone()); wasm_fn.get_signature() } else if item.hasattr("__annotations__")? { let annot = item.getattr("__annotations__")?.cast_as::<PyDict>()?; get_signature_from_py_annotation(&annot, pointer_type, call_conv)? } else { // TODO support calls without annotations? continue; }; let sig_id = module.signatures.push(sig.clone()); let func_id = module.functions.push(sig_id); module .exports .insert(name.to_string(), Export::Function(func_id)); let trampoline = make_trampoline( isa.as_ref(), &mut code_memory, &mut fn_builder_ctx, func_id.index() as u32, &sig, ); finished_functions.push(trampoline); bound_functions.push(BoundPyFunction { name: name.to_string(), obj: item.into_py(py), }); } else if item.get_type().is_subclass::<Memory>()? { let wasm_mem = item.cast_as::<Memory>()?; dependencies.insert(wasm_mem.instance.clone()); let plan = wasm_mem.get_plan(); let mem_id = module.memory_plans.push(plan); let _mem_id_2 = memories.push(wasm_mem.into_import()); assert_eq!(mem_id, _mem_id_2); let _mem_id_3 = module .imported_memories .push((String::from(""), String::from(""))); assert_eq!(mem_id, _mem_id_3); module .exports .insert(name.to_string(), Export::Memory(mem_id)); } } let imports = Imports::new( dependencies, PrimaryMap::new(), PrimaryMap::new(), memories, PrimaryMap::new(), ); let data_initializers = Vec::new(); let signatures = PrimaryMap::new(); code_memory.publish(); let import_obj_state = ImportObjState { calls: bound_functions, code_memory, }; Ok(InstanceHandle::new( Rc::new(module), global_exports, finished_functions.into_boxed_slice(), imports, &data_initializers, signatures.into_boxed_slice(), None, Box::new(import_obj_state), ) .expect("instance")) } /// We don't expect trampoline compilation to produce any relocations, so /// this `RelocSink` just asserts that it doesn't recieve any. struct RelocSink {} impl binemit::RelocSink for RelocSink { fn reloc_ebb( &mut self, _offset: binemit::CodeOffset, _reloc: binemit::Reloc, _ebb_offset: binemit::CodeOffset, ) { panic!("trampoline compilation should not produce ebb relocs"); } fn reloc_external( &mut self, _offset: binemit::CodeOffset, _reloc: binemit::Reloc, _name: &ir::ExternalName, _addend: binemit::Addend, ) { panic!("trampoline compilation should not produce external symbol relocs"); } fn reloc_constant( &mut self, _code_offset: binemit::CodeOffset, _reloc: binemit::Reloc, _constant_offset: ir::ConstantOffset, ) { panic!("trampoline compilation should not produce constant relocs"); } fn reloc_jt( &mut self, _offset: binemit::CodeOffset, _reloc: binemit::Reloc, _jt: ir::JumpTable, ) { panic!("trampoline compilation should not produce jump table relocs"); } }
31.824
98
0.590582
bb7a66af669ff023f5c0b5d920fde01dee24f1ff
417
/* Check if a item is unique when compared to given number */ fn check_dup(v: Vec<i32>, num: i32) -> Result<bool, String> { let mut is_unique: bool = false; if !v.contains(&num) { is_unique = true; } Ok(is_unique) } fn main() { let v: Vec<i32> = vec![1, 4, 7, 34, 57]; let num: i32 = 567; println!("Is item unique? -> {}", check_dup(v, num).expect("not found")); }
18.130435
77
0.553957
098cc8d89eb361401377a3a19321c28135ab0bae
8,013
// Copyright 2022 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use common_datavalues::prelude::*; use common_exception::ErrorCode; use common_exception::Result; use super::xor::LogicXorFunction; use super::LogicAndFunction; use super::LogicNotFunction; use super::LogicOrFunction; use crate::scalars::cast_column_field; use crate::scalars::Function; use crate::scalars::FunctionFactory; #[derive(Clone)] pub struct LogicFunction { op: LogicOperator, } #[derive(Clone, Debug)] pub enum LogicOperator { Not, And, Or, Xor, } impl LogicFunction { pub fn try_create(op: LogicOperator) -> Result<Box<dyn Function>> { Ok(Box::new(Self { op })) } pub fn register(factory: &mut FunctionFactory) { factory.register("and", LogicAndFunction::desc()); factory.register("or", LogicOrFunction::desc()); factory.register("not", LogicNotFunction::desc()); factory.register("xor", LogicXorFunction::desc()); } fn eval_not(&self, columns: &ColumnsWithField, input_rows: usize) -> Result<ColumnRef> { let mut nullable = false; if columns[0].data_type().is_nullable() { nullable = true; } let dt = if nullable { Arc::new(NullableType::create(BooleanType::arc())) } else { BooleanType::arc() }; let col = cast_column_field(&columns[0], &dt)?; let col_viewer = bool::try_create_viewer(&col)?; if nullable { let mut builder = NullableColumnBuilder::<bool>::with_capacity(input_rows); for (idx, data) in col_viewer.iter().enumerate() { builder.append(!data, col_viewer.valid_at(idx)); } Ok(builder.build(input_rows)) } else { let mut builder = ColumnBuilder::<bool>::with_capacity(input_rows); for value in col_viewer.iter() { builder.append(!value); } Ok(builder.build(input_rows)) } } fn eval_and_not_or(&self, columns: &ColumnsWithField, input_rows: usize) -> Result<ColumnRef> { let mut nullable = false; if columns[0].data_type().is_nullable() || columns[1].data_type().is_nullable() { nullable = true; } let dt = if nullable { Arc::new(NullableType::create(BooleanType::arc())) } else { BooleanType::arc() }; let lhs = cast_column_field(&columns[0], &dt)?; let rhs = cast_column_field(&columns[1], &dt)?; if nullable { let lhs_viewer = bool::try_create_viewer(&lhs)?; let rhs_viewer = bool::try_create_viewer(&rhs)?; let lhs_viewer_iter = lhs_viewer.iter(); let rhs_viewer_iter = rhs_viewer.iter(); let mut builder = NullableColumnBuilder::<bool>::with_capacity(input_rows); macro_rules! calcute_with_null { ($lhs_viewer: expr, $rhs_viewer: expr, $lhs_viewer_iter: expr, $rhs_viewer_iter: expr, $builder: expr, $func: expr) => { for (a, (idx, b)) in $lhs_viewer_iter.zip($rhs_viewer_iter.enumerate()) { let (val, valid) = $func(a, b, $lhs_viewer.valid_at(idx), $rhs_viewer.valid_at(idx)); $builder.append(val, valid); } }; } match self.op { LogicOperator::And => calcute_with_null!( lhs_viewer, rhs_viewer, lhs_viewer_iter, rhs_viewer_iter, builder, |lhs: bool, rhs: bool, l_valid: bool, r_valid: bool| -> (bool, bool) { (lhs & rhs, l_valid & r_valid) } ), LogicOperator::Or => calcute_with_null!( lhs_viewer, rhs_viewer, lhs_viewer_iter, rhs_viewer_iter, builder, |lhs: bool, rhs: bool, _l_valid: bool, _r_valid: bool| -> (bool, bool) { (lhs || rhs, lhs || rhs) } ), LogicOperator::Xor => calcute_with_null!( lhs_viewer, rhs_viewer, lhs_viewer_iter, rhs_viewer_iter, builder, |lhs: bool, rhs: bool, l_valid: bool, r_valid: bool| -> (bool, bool) { (lhs ^ rhs, l_valid & r_valid) } ), LogicOperator::Not => return Err(ErrorCode::LogicalError("never happen")), }; Ok(builder.build(input_rows)) } else { let lhs_viewer = bool::try_create_viewer(&lhs)?; let rhs_viewer = bool::try_create_viewer(&rhs)?; let mut builder = ColumnBuilder::<bool>::with_capacity(input_rows); macro_rules! calcute { ($lhs_viewer: expr, $rhs_viewer: expr, $builder: expr, $func: expr) => { for (a, b) in ($lhs_viewer.iter().zip($rhs_viewer.iter())) { $builder.append($func(a, b)); } }; } match self.op { LogicOperator::And => calcute!( lhs_viewer, rhs_viewer, builder, |lhs: bool, rhs: bool| -> bool { lhs & rhs } ), LogicOperator::Or => calcute!( lhs_viewer, rhs_viewer, builder, |lhs: bool, rhs: bool| -> bool { lhs || rhs } ), LogicOperator::Xor => calcute!( lhs_viewer, rhs_viewer, builder, |lhs: bool, rhs: bool| -> bool { lhs ^ rhs } ), LogicOperator::Not => return Err(ErrorCode::LogicalError("never happen")), }; Ok(builder.build(input_rows)) } } } impl Function for LogicFunction { fn name(&self) -> &str { "LogicFunction" } fn return_type(&self, args: &[&DataTypePtr]) -> Result<DataTypePtr> { match self.op { LogicOperator::Not => { if args[0].is_nullable() { Ok(Arc::new(NullableType::create(BooleanType::arc()))) } else { Ok(BooleanType::arc()) } } _ => { if args[0].is_nullable() || args[1].is_nullable() { Ok(Arc::new(NullableType::create(BooleanType::arc()))) } else { Ok(BooleanType::arc()) } } } } fn eval(&self, columns: &ColumnsWithField, input_rows: usize) -> Result<ColumnRef> { match self.op { LogicOperator::Not => self.eval_not(columns, input_rows), _ => self.eval_and_not_or(columns, input_rows), } } fn passthrough_null(&self) -> bool { !matches!(self.op, LogicOperator::Or) } } impl std::fmt::Display for LogicFunction { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.op) } }
33.527197
137
0.512542
646a3fa97a3dcb1e0976512d333e4fe4f9ae7dbd
3,809
/* See LICENSE file for copyright and license details ** Authors: ** Todd Gaunt ** Description: ** This program implements the Jacobi algorithm concurrently to find the ** steady-state temperature distribution on an insulated ** two-dimensional plate, given constant boundary conditions. ** ** This version was created in November 2017, to compare to a C++ version ** Using Pthreads. */ extern crate time; use std::thread; use time::PreciseTime; use std::env; use std::process; const SIZE : usize = 1280; const TEMP : f64 = 50.0; const EPSILON : f64 = 0.1; fn usage() { eprint!("Usage: jacobi [NUMBER_OF_THREADS]\n"); } fn main () { let args: Vec<String> = env::args().collect(); if 2 != args.len() { usage(); process::exit(-1); } // Number of threads to use let n_threads = args[1].parse::<usize>().unwrap(); let start = PreciseTime::now(); let split = SIZE / n_threads; let mut new : Vec<Vec<f64>> = vec![vec![0.0; SIZE]; SIZE]; let mut old : Vec<Vec<f64>> = vec![vec![0.0; SIZE]; SIZE]; /* Initialize the temperatures */ for i in 1..SIZE { /* Inner core */ for j in 1..SIZE - 1 { old[i][j] = 50.0; new[i][j] = 50.0; } } for i in 1..SIZE - 1 { /* South Boundry */ old[SIZE - 1][i] = 100.0; new[SIZE - 1][i] = 100.0; } loop { let mut children = vec![]; for id in 0..n_threads { let old = old.clone(); let mut new = new.clone(); children.push(thread::spawn(move || { let mut beg = id * split; let mut end = beg + split; if id == 0 { beg += 1; } if id == n_threads - 1 { end -= 1; } for i in beg..end { for j in 1..SIZE - 1 { new[i][j] = (old[i - 1][j] + old[i + 1][j] + old[i][j + 1] + old[i][j - 1]) / 4.0; } } return new.clone(); })); } let mut maxerr = 0.0; let mut i = 0; for child in children { let rows = child.join().unwrap(); let mut beg = i * split; let mut end = beg + split; if i == 0 { beg += 1; } else if i == n_threads - 1 { end -= 1; } for j in beg..end { for k in 1..SIZE - 1 { new[j][k] = rows[j][k]; } } i += 1; } /* Print-out for debugging */ /* for i in 0..SIZE { for j in 0..SIZE { print!("{} ", new[i][j]); } println!(); } */ for i in 1..SIZE - 1 { for j in 1..SIZE - 1 { let change = (old[i][j] - new[i][j]).abs(); if maxerr < change { maxerr = change; } } } if EPSILON >= maxerr { break; } for i in 1..SIZE - 1 { for j in 1..SIZE - 1 { old[i][j] = new[i][j]; } } } let mut cool_cells = 0; for i in 0..SIZE { for j in 0..SIZE { if new[i][j] < TEMP { cool_cells += 1 } } } let stop = PreciseTime::now(); let elapsed = start.to(stop).to_string(); println!("{}", &elapsed[2..]); eprintln!("There are {} cells cooler than {} degrees", cool_cells, TEMP); }
26.636364
74
0.407719
ab28143b41f0716dbec9a95f04dd73e4bd966713
37,925
#![allow(missing_docs)] use gl; use hal::{self, buffer, command, image, memory, pass, pso, query, ColorSlot}; use hal::format::ChannelType; use hal::range::RangeArg; use {native as n, Backend}; use pool::{self, BufferMemory}; use std::borrow::Borrow; use std::{mem, slice}; use std::ops::Range; use std::sync::{Arc, Mutex}; // Command buffer implementation details: // // The underlying commands and data are stored inside the associated command pool. // See the comments for further safety requirements. // Each command buffer holds a (growable) slice of the buffers in the pool. // // Command buffers are recorded one-after-another for each command pool. // Actual storage depends on the resetting behavior of the pool. /// The place of some data in a buffer. #[derive(Clone, Copy, PartialEq, Debug)] pub struct BufferSlice { pub offset: u32, pub size: u32, } impl BufferSlice { fn new() -> Self { BufferSlice { offset: 0, size: 0, } } // Append a data pointer, resulting in one data pointer // covering the whole memory region. fn append(&mut self, other: BufferSlice) { if self.size == 0 { // Empty or dummy pointer self.offset = other.offset; self.size = other.size; } else { assert_eq!(self.offset + self.size, other.offset); self.size += other.size; } } } /// #[derive(Clone, Debug)] pub enum Command { Dispatch(hal::WorkGroupCount), DispatchIndirect(gl::types::GLuint, buffer::Offset), Draw { primitive: gl::types::GLenum, vertices: Range<hal::VertexCount>, instances: Range<hal::InstanceCount>, }, DrawIndexed { primitive: gl::types::GLenum, index_type: gl::types::GLenum, index_count: hal::IndexCount, index_buffer_offset: buffer::Offset, base_vertex: hal::VertexOffset, instances: Range<hal::InstanceCount>, }, BindIndexBuffer(gl::types::GLuint), //BindVertexBuffers(BufferSlice), SetViewports { first_viewport: u32, viewport_ptr: BufferSlice, depth_range_ptr: BufferSlice, }, SetScissors(u32, BufferSlice), SetBlendColor(pso::ColorValue), /// Clear floating-point color drawbuffer of bound framebuffer. ClearBufferColorF(DrawBuffer, [f32; 4]), /// Clear unsigned integer color drawbuffer of bound framebuffer. ClearBufferColorU(DrawBuffer, [u32; 4]), /// Clear signed integer color drawbuffer of bound framebuffer. ClearBufferColorI(DrawBuffer, [i32; 4]), /// Clear depth-stencil drawbuffer of bound framebuffer. ClearBufferDepthStencil(Option<pso::DepthValue>, Option<pso::StencilValue>), /// Set list of color attachments for drawing. /// The buffer slice contains a list of `GLenum`. DrawBuffers(BufferSlice), BindFrameBuffer(FrameBufferTarget, n::FrameBuffer), BindTargetView(FrameBufferTarget, AttachmentPoint, n::ImageView), SetDrawColorBuffers(usize), SetPatchSize(gl::types::GLint), BindProgram(gl::types::GLuint), BindBlendSlot(ColorSlot, pso::ColorBlendDesc), BindAttribute(n::AttributeDesc, gl::types::GLuint, gl::types::GLsizei, n::VertexAttribFunction), //UnbindAttribute(n::AttributeDesc), CopyBufferToBuffer(n::RawBuffer, n::RawBuffer, command::BufferCopy), CopyBufferToTexture(n::RawBuffer, n::Texture, command::BufferImageCopy), CopyBufferToSurface(n::RawBuffer, n::Surface, command::BufferImageCopy), CopyTextureToBuffer(n::Texture, n::RawBuffer, command::BufferImageCopy), CopySurfaceToBuffer(n::Surface, n::RawBuffer, command::BufferImageCopy), CopyImageToTexture(n::ImageKind, n::Texture, command::ImageCopy), CopyImageToSurface(n::ImageKind, n::Surface, command::ImageCopy), } pub type FrameBufferTarget = gl::types::GLenum; pub type AttachmentPoint = gl::types::GLenum; pub type DrawBuffer = gl::types::GLint; #[derive(Clone)] struct AttachmentClear { subpass_id: Option<pass::SubpassId>, value: Option<command::ClearValueRaw>, stencil_value: Option<pso::StencilValue>, } #[derive(Clone)] pub struct RenderPassCache { render_pass: n::RenderPass, framebuffer: n::FrameBuffer, attachment_clears: Vec<AttachmentClear>, } // Cache current states of the command buffer #[derive(Clone)] struct Cache { // Active primitive topology, set by the current pipeline. primitive: Option<gl::types::GLenum>, // Active index type, set by the current index buffer. index_type: Option<hal::IndexType>, // Stencil reference values (front, back). stencil_ref: Option<(pso::StencilValue, pso::StencilValue)>, // Blend color. blend_color: Option<pso::ColorValue>, /// framebuffer: Option<(FrameBufferTarget, n::FrameBuffer)>, /// // Indicates that invalid commands have been recorded. error_state: bool, // Vertices per patch for tessellation primitives (patches). patch_size: Option<gl::types::GLint>, // Active program name. program: Option<gl::types::GLuint>, // Blend per attachment. blend_targets: Option<Vec<Option<pso::ColorBlendDesc>>>, // Maps bound vertex buffer offset (index) to handle. vertex_buffers: Vec<gl::types::GLuint>, // Active vertex buffer descriptions. vertex_buffer_descs: Vec<Option<pso::VertexBufferDesc>>, // Active attributes. attributes: Vec<n::AttributeDesc>, } impl Cache { pub fn new() -> Cache { Cache { primitive: None, index_type: None, stencil_ref: None, blend_color: None, framebuffer: None, error_state: false, patch_size: None, program: None, blend_targets: None, vertex_buffers: Vec::new(), vertex_buffer_descs: Vec::new(), attributes: Vec::new(), } } } // This is a subset of the device limits stripped down to the ones needed // for command buffer validation. #[derive(Debug, Clone, Copy)] pub struct Limits { max_viewports: usize, } impl From<hal::Limits> for Limits { fn from(l: hal::Limits) -> Self { Limits { max_viewports: l.max_viewports, } } } /// A command buffer abstraction for OpenGL. /// /// If you want to display your rendered results to a framebuffer created externally, see the /// `display_fb` field. #[derive(Clone)] pub struct RawCommandBuffer { pub(crate) memory: Arc<Mutex<BufferMemory>>, pub(crate) buf: BufferSlice, // Buffer id for the owning command pool. // Only relevant if individual resets are allowed. pub(crate) id: u64, individual_reset: bool, fbo: n::FrameBuffer, /// The framebuffer to use for rendering to the main targets (0 by default). /// /// Use this to set the framebuffer that will be used for the screen display targets created /// with `create_main_targets_raw`. Usually you don't need to set this field directly unless /// your OS doesn't provide a default framebuffer with name 0 and you have to render to a /// different framebuffer object that can be made visible on the screen (iOS/tvOS need this). /// /// This framebuffer must exist and be configured correctly (with renderbuffer attachments, /// etc.) so that rendering to it can occur immediately. pub display_fb: n::FrameBuffer, cache: Cache, pass_cache: Option<RenderPassCache>, cur_subpass: usize, limits: Limits, active_attribs: usize, } impl RawCommandBuffer { pub(crate) fn new( fbo: n::FrameBuffer, limits: Limits, memory: Arc<Mutex<BufferMemory>>, ) -> Self { let (id, individual_reset) = { let mut memory = memory .try_lock() .expect("Trying to allocate a command buffers, while memory is in-use."); match *memory { BufferMemory::Linear(_) => (0, false), BufferMemory::Individual { ref mut storage, ref mut next_buffer_id } => { // Add a new pair of buffers storage.insert(*next_buffer_id, pool::OwnedBuffer::new()); let id = *next_buffer_id; *next_buffer_id += 1; (id, true) } } }; RawCommandBuffer { memory, buf: BufferSlice::new(), id, individual_reset, fbo, display_fb: 0 as n::FrameBuffer, cache: Cache::new(), pass_cache: None, cur_subpass: !0, limits, active_attribs: 0, } } // Soft reset only the buffers, but doesn't free any memory or clears memory // of the owning pool. pub(crate) fn soft_reset(&mut self) { self.buf = BufferSlice::new(); self.cache = Cache::new(); self.pass_cache = None; self.cur_subpass = !0; } fn push_cmd(&mut self, cmd: Command) { push_cmd_internal(&self.id, &mut self.memory, &mut self.buf, cmd); } /// Copy a given vector slice into the data buffer. fn add<T>(&mut self, data: &[T]) -> BufferSlice { self.add_raw(unsafe { slice::from_raw_parts( data.as_ptr() as *const _, data.len() * mem::size_of::<T>(), ) }) } /// Copy a given u8 slice into the data buffer. fn add_raw(&mut self, data: &[u8]) -> BufferSlice { let mut memory = self .memory .try_lock() .expect("Trying to record a command buffers, while memory is in-use."); let data_buffer = match *memory { BufferMemory::Linear(ref mut buffer) => &mut buffer.data, BufferMemory::Individual { ref mut storage, .. } => { &mut storage.get_mut(&self.id).unwrap().data } }; data_buffer.extend_from_slice(data); let slice = BufferSlice { offset: (data_buffer.len() - data.len()) as u32, size: data.len() as u32, }; slice } fn update_blend_targets(&mut self, blend_targets: &Vec<pso::ColorBlendDesc>) { let max_blend_slots = blend_targets.len(); if max_blend_slots > 0 { match self.cache.blend_targets { Some(ref mut cached) => { if cached.len() < max_blend_slots { cached.resize(max_blend_slots, None); } } None => { self.cache.blend_targets = Some(vec![None; max_blend_slots]); } }; } for (slot, blend_target) in blend_targets.iter().enumerate() { let mut update_blend = false; if let Some(ref mut cached_targets) = self.cache.blend_targets { if let Some(cached_target) = cached_targets.get(slot) { match cached_target { &Some(ref cache) => { if cache != blend_target { update_blend = true; } } &None => { update_blend = true; } } } if update_blend { cached_targets[slot] = Some(*blend_target); } } if update_blend { self.push_cmd(Command::BindBlendSlot(slot as _, *blend_target)); } } } pub(crate) fn bind_attributes(&mut self) { let Cache { ref attributes, ref vertex_buffers, ref vertex_buffer_descs, .. } = self.cache; for attribute in attributes { let binding = attribute.binding as usize; if vertex_buffers.len() <= binding { error!("No vertex buffer bound at {}", binding); } let handle = vertex_buffers[binding]; match vertex_buffer_descs.get(binding) { Some(&Some(desc)) => { assert_eq!(desc.rate, 0); // TODO: Input rate push_cmd_internal( &self.id, &mut self.memory, &mut self.buf, Command::BindAttribute(*attribute, handle, desc.stride as _, attribute.vertex_attrib_fn) ); } _ => error!("No vertex buffer description bound at {}", binding), } } } fn begin_subpass(&mut self) { // Split processing and command recording due to borrowchk. let (draw_buffers, clear_cmds) = { let state = self.pass_cache.as_ref().unwrap(); let subpass = &state.render_pass.subpasses[self.cur_subpass]; // See `begin_renderpass_cache` for clearing strategy // Bind draw buffers for mapping color output locations with // framebuffer attachments. let draw_buffers = if state.framebuffer == n::DEFAULT_FRAMEBUFFER { // The default framebuffer is created by the driver // We don't have influence on its layout and we treat it as single image. // // TODO: handle case where we don't du double-buffering? vec![gl::BACK_LEFT] } else { subpass .color_attachments .iter() .map(|id| gl::COLOR_ATTACHMENT0 + *id as gl::types::GLenum) .collect::<Vec<_>>() }; let clear_cmds = state .render_pass .attachments .iter() .zip(state.attachment_clears.iter()) .filter_map(|(attachment, clear)| { // Check if the attachment is first used in this subpass if clear.subpass_id != Some(self.cur_subpass) { return None; } // View format needs to be known at this point. // All attachments specified in the renderpass must have a valid, // matching image view bound in the framebuffer. let view_format = attachment.format.unwrap(); // Clear color target if view_format.is_color() { if let Some(cv) = clear.value { let channel = view_format.base_format().1; let cmd = match channel { ChannelType::Unorm | ChannelType::Inorm | ChannelType::Ufloat | ChannelType::Float | ChannelType::Srgb | ChannelType::Uscaled | ChannelType::Iscaled => Command::ClearBufferColorF(0, unsafe { cv.color.float32 }), ChannelType::Uint => Command::ClearBufferColorU(0, unsafe { cv.color.uint32 }), ChannelType::Int => Command::ClearBufferColorI(0, unsafe { cv.color.int32 }), }; return Some(cmd); } } else { // Clear depth-stencil target let depth = if view_format.is_depth() { clear.value.map(|cv| unsafe { cv.depth_stencil.depth }) } else { None }; let stencil = if view_format.is_stencil() { clear.stencil_value } else { None }; if depth.is_some() || stencil.is_some() { return Some(Command::ClearBufferDepthStencil(depth, stencil)); } } None }) .collect::<Vec<_>>(); (draw_buffers, clear_cmds) }; // Record commands let draw_buffers = self.add(&draw_buffers); self.push_cmd(Command::DrawBuffers(draw_buffers)); for cmd in clear_cmds { self.push_cmd(cmd); } } } impl command::RawCommandBuffer<Backend> for RawCommandBuffer { fn begin( &mut self, _flags: hal::command::CommandBufferFlags, _inheritance_info: hal::command::CommandBufferInheritanceInfo<Backend> ) { // TODO: Implement flags! if self.individual_reset { // Implicit buffer reset when individual reset is set. self.reset(false); } else { self.soft_reset(); } } fn finish(&mut self) { // no-op } fn reset(&mut self, _release_resources: bool) { if !self.individual_reset { error!("Associated pool must allow individual resets."); return } self.soft_reset(); let mut memory = self .memory .try_lock() .expect("Trying to reset a command buffer, while memory is in-use."); match *memory { // Linear` can't have individual reset ability. BufferMemory::Linear(_) => unreachable!(), BufferMemory::Individual { ref mut storage, .. } => { // TODO: should use the `release_resources` and shrink the buffers? storage .get_mut(&self.id) .map(|buffer| { buffer.commands.clear(); buffer.data.clear(); }); } } } fn pipeline_barrier<'a, T>( &mut self, _stages: Range<hal::pso::PipelineStage>, _dependencies: memory::Dependencies, _barriers: T, ) where T: IntoIterator, T::Item: Borrow<memory::Barrier<'a, Backend>>, { // TODO } fn fill_buffer<R>(&mut self, _buffer: &n::Buffer, _range: R, _data: u32) where R: RangeArg<buffer::Offset>, { unimplemented!() } fn update_buffer(&mut self, _buffer: &n::Buffer, _offset: buffer::Offset, _data: &[u8]) { unimplemented!() } fn begin_render_pass<T>( &mut self, render_pass: &n::RenderPass, framebuffer: &n::FrameBuffer, _render_area: pso::Rect, clear_values: T, _first_subpass: command::SubpassContents, ) where T: IntoIterator, T::Item: Borrow<command::ClearValueRaw>, { // TODO: load ops: clearing strategy // 1. < GL 3.0 / GL ES 2.0: glClear, only single color attachment? // 2. = GL ES 2.0: glBindFramebuffer + glClear (no single draw buffer supported) // 3. >= GL 3.0 / GL ES 3.0: glBindFramerbuffer + glClearBuffer // // Clearing when entering a subpass: // * Acquire channel information from renderpass description to // select correct ClearBuffer variant. // * Check for attachment loading clearing strategy // TODO: store ops: // < GL 4.5: Ignore // >= GL 4.5: Invalidate framebuffer attachment when store op is `DONT_CARE`. // 2./3. self.push_cmd(Command::BindFrameBuffer(gl::DRAW_FRAMEBUFFER, *framebuffer)); let attachment_clears = render_pass.attachments .iter() .zip(clear_values.into_iter()) .enumerate() .map(|(i, (attachment, clear_value))| { AttachmentClear { subpass_id: render_pass.subpasses.iter().position(|sp| sp.is_using(i)), value: if attachment.ops.load == pass::AttachmentLoadOp::Clear { Some(*clear_value.borrow()) } else { None }, stencil_value: if attachment.stencil_ops.load == pass::AttachmentLoadOp::Clear { Some(unsafe { clear_value.borrow().depth_stencil.stencil }) } else { None }, } }).collect(); self.pass_cache = Some(RenderPassCache { render_pass: render_pass.clone(), framebuffer: *framebuffer, attachment_clears, }); // Enter first subpass self.cur_subpass = 0; self.begin_subpass(); } fn next_subpass(&mut self, _contents: command::SubpassContents) { unimplemented!() } fn end_render_pass(&mut self) { // TODO } fn clear_image<T>( &mut self, image: &n::Image, _: image::Layout, color: command::ClearColorRaw, _depth_stencil: command::ClearDepthStencilRaw, _subresource_ranges: T, ) where T: IntoIterator, T::Item: Borrow<image::SubresourceRange>, { // TODO: clearing strategies // 1. < GL 3.0 / GL ES 3.0: glClear // 2. < GL 4.4: glClearBuffer // 3. >= GL 4.4: glClearTexSubImage // 2. ClearBuffer // TODO: reset color mask let fbo = self.fbo; let view = match image.kind { n::ImageKind::Surface(id) => n::ImageView::Surface(id), n::ImageKind::Texture(id) => n::ImageView::Texture(id, 0), //TODO }; self.push_cmd(Command::BindFrameBuffer(gl::DRAW_FRAMEBUFFER, fbo)); self.push_cmd(Command::BindTargetView(gl::DRAW_FRAMEBUFFER, gl::COLOR_ATTACHMENT0, view)); self.push_cmd(Command::SetDrawColorBuffers(1)); match image.channel { ChannelType::Unorm | ChannelType::Inorm | ChannelType::Ufloat | ChannelType::Float | ChannelType::Srgb | ChannelType::Uscaled | ChannelType::Iscaled => self.push_cmd(Command::ClearBufferColorF(0, unsafe { color.float32 })), ChannelType::Uint => self.push_cmd(Command::ClearBufferColorU(0, unsafe { color.uint32 })), ChannelType::Int => self.push_cmd(Command::ClearBufferColorI(0, unsafe { color.int32 })), } } fn clear_attachments<T, U>(&mut self, _: T, _: U) where T: IntoIterator, T::Item: Borrow<command::AttachmentClear>, U: IntoIterator, U::Item: Borrow<pso::ClearRect>, { unimplemented!() } fn resolve_image<T>( &mut self, _src: &n::Image, _src_layout: image::Layout, _dst: &n::Image, _dst_layout: image::Layout, _regions: T, ) where T: IntoIterator, T::Item: Borrow<command::ImageResolve>, { unimplemented!() } fn blit_image<T>( &mut self, _src: &n::Image, _src_layout: image::Layout, _dst: &n::Image, _dst_layout: image::Layout, _filter: image::Filter, _regions: T, ) where T: IntoIterator, T::Item: Borrow<command::ImageBlit> { unimplemented!() } fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView<Backend>) { // TODO: how can we incorporate the buffer offset? if ibv.offset > 0 { warn!("Non-zero index buffer offset currently not handled."); } self.cache.index_type = Some(ibv.index_type); self.push_cmd(Command::BindIndexBuffer(ibv.buffer.raw)); } fn bind_vertex_buffers(&mut self, _first_binding: u32, vbs: hal::pso::VertexBufferSet<Backend>) { if vbs.0.len() == 0 { return } let needed_length = vbs.0.iter().map(|vb| vb.1).max().unwrap() + 1; self.cache.vertex_buffers.resize(needed_length as usize, 0); for vb in vbs.0 { self.cache.vertex_buffers[vb.1 as usize] = vb.0.raw; } } fn set_viewports<T>(&mut self, first_viewport: u32, viewports: T) where T: IntoIterator, T::Item: Borrow<pso::Viewport>, { // OpenGL has two functions for setting the viewports. // Configuring the rectangle area and setting the depth bounds are separated. // // We try to store everything into a contiguous block of memory, // which allows us to avoid memory allocations when executing the commands. let mut viewport_ptr = BufferSlice { offset: 0, size: 0 }; let mut depth_range_ptr = BufferSlice { offset: 0, size: 0 }; let mut len = 0; for viewport in viewports { let viewport = viewport.borrow(); let viewport_rect = &[viewport.rect.x as f32, viewport.rect.y as f32, viewport.rect.w as f32, viewport.rect.h as f32]; viewport_ptr.append(self.add::<f32>(viewport_rect)); let depth_range = &[viewport.depth.start as f64, viewport.depth.end as f64]; depth_range_ptr.append(self.add::<f64>(depth_range)); len += 1; } match len { 0 => { error!("Number of viewports can not be zero."); self.cache.error_state = true; } n if n + first_viewport as usize <= self.limits.max_viewports => { self.push_cmd(Command::SetViewports { first_viewport, viewport_ptr, depth_range_ptr }); } _ => { error!("Number of viewports and first viewport index exceed the number of maximum viewports"); self.cache.error_state = true; } } } fn set_scissors<T>(&mut self, first_scissor: u32, scissors: T) where T: IntoIterator, T::Item: Borrow<pso::Rect>, { let mut scissors_ptr = BufferSlice { offset: 0, size: 0 }; let mut len = 0; for scissor in scissors { let scissor = scissor.borrow(); let scissor = &[scissor.x as i32, scissor.y as i32, scissor.w as i32, scissor.h as i32]; scissors_ptr.append(self.add::<i32>(scissor)); len += 1; } match len { 0 => { error!("Number of scissors can not be zero."); self.cache.error_state = true; } n if n + first_scissor as usize <= self.limits.max_viewports => { self.push_cmd(Command::SetScissors(first_scissor, scissors_ptr)); } _ => { error!("Number of scissors and first scissor index exceed the maximum number of viewports"); self.cache.error_state = true; } } } fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue) { assert!(!faces.is_empty()); let mut front = 0; let mut back = 0; if let Some((last_front, last_back)) = self.cache.stencil_ref { front = last_front; back = last_back; } if faces.contains(pso::Face::FRONT) { front = value; } if faces.contains(pso::Face::BACK) { back = value; } // Only cache the stencil references values until // we assembled all the pieces to set the stencil state // from the pipeline. self.cache.stencil_ref = Some((front, back)); } fn set_stencil_read_mask(&mut self, _faces: pso::Face, _value: pso::StencilValue) { unimplemented!(); } fn set_stencil_write_mask(&mut self, _faces: pso::Face, _value: pso::StencilValue) { unimplemented!(); } fn set_blend_constants(&mut self, cv: pso::ColorValue) { if self.cache.blend_color != Some(cv) { self.cache.blend_color = Some(cv); self.push_cmd(Command::SetBlendColor(cv)); } } fn set_depth_bounds(&mut self, _: Range<f32>) { warn!("Depth bounds test is not supported"); } fn set_line_width(&mut self, _width: f32) { unimplemented!() } fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) { unimplemented!() } fn bind_graphics_pipeline(&mut self, pipeline: &n::GraphicsPipeline) { let n::GraphicsPipeline { primitive, patch_size, program, ref blend_targets, ref attributes, ref vertex_buffers, } = *pipeline; if self.cache.primitive != Some(primitive) { self.cache.primitive = Some(primitive); } if self.cache.patch_size != patch_size { self.cache.patch_size = patch_size; if let Some(size) = patch_size { self.push_cmd(Command::SetPatchSize(size)); } } if self.cache.program != Some(program) { self.cache.program = Some(program); self.push_cmd(Command::BindProgram(program)); } self.cache.attributes = attributes.clone(); self.cache.vertex_buffer_descs = vertex_buffers.clone(); self.update_blend_targets(blend_targets); } fn bind_graphics_descriptor_sets<I, J>( &mut self, _layout: &n::PipelineLayout, _first_set: usize, _sets: I, _offsets: J, ) where I: IntoIterator, I::Item: Borrow<n::DescriptorSet>, J: IntoIterator, J::Item: Borrow<command::DescriptorSetOffset>, { // TODO } fn bind_compute_pipeline(&mut self, pipeline: &n::ComputePipeline) { let n::ComputePipeline { program, } = *pipeline; if self.cache.program != Some(program) { self.cache.program = Some(program); self.push_cmd(Command::BindProgram(program)); } } fn bind_compute_descriptor_sets<I, J>( &mut self, _layout: &n::PipelineLayout, _first_set: usize, _sets: I, _offsets: J, ) where I: IntoIterator, I::Item: Borrow<n::DescriptorSet>, J: IntoIterator, J::Item: Borrow<command::DescriptorSetOffset>, { // TODO } fn dispatch(&mut self, count: hal::WorkGroupCount) { self.push_cmd(Command::Dispatch(count)); } fn dispatch_indirect(&mut self, buffer: &n::Buffer, offset: buffer::Offset) { self.push_cmd(Command::DispatchIndirect(buffer.raw, offset)); } fn copy_buffer<T>(&mut self, src: &n::Buffer, dst: &n::Buffer, regions: T) where T: IntoIterator, T::Item: Borrow<command::BufferCopy>, { let old_offset = self.buf.offset; for region in regions { let r = region.borrow().clone(); let cmd = Command::CopyBufferToBuffer(src.raw, dst.raw, r); self.push_cmd(cmd); } if self.buf.offset == old_offset { error!("At least one region must be specified"); } } fn copy_image<T>( &mut self, src: &n::Image, _src_layout: image::Layout, dst: &n::Image, _dst_layout: image::Layout, regions: T, ) where T: IntoIterator, T::Item: Borrow<command::ImageCopy>, { let old_offset = self.buf.offset; for region in regions { let r = region.borrow().clone(); let cmd = match dst.kind { n::ImageKind::Surface(s) => Command::CopyImageToSurface(src.kind, s, r), n::ImageKind::Texture(t) => Command::CopyImageToTexture(src.kind, t, r), }; self.push_cmd(cmd); } if self.buf.offset == old_offset { error!("At least one region must be specified"); } } fn copy_buffer_to_image<T>( &mut self, src: &n::Buffer, dst: &n::Image, _: image::Layout, regions: T, ) where T: IntoIterator, T::Item: Borrow<command::BufferImageCopy>, { let old_size = self.buf.size; for region in regions { let r = region.borrow().clone(); let cmd = match dst.kind { n::ImageKind::Surface(s) => Command::CopyBufferToSurface(src.raw, s, r), n::ImageKind::Texture(t) => Command::CopyBufferToTexture(src.raw, t, r), }; self.push_cmd(cmd); } if self.buf.size == old_size { error!("At least one region must be specified"); } } fn copy_image_to_buffer<T>( &mut self, src: &n::Image, _: image::Layout, dst: &n::Buffer, regions: T, ) where T: IntoIterator, T::Item: Borrow<command::BufferImageCopy>, { let old_size = self.buf.size; for region in regions { let r = region.borrow().clone(); let cmd = match src.kind { n::ImageKind::Surface(s) => Command::CopySurfaceToBuffer(s, dst.raw, r), n::ImageKind::Texture(t) => Command::CopyTextureToBuffer(t, dst.raw, r), }; self.push_cmd(cmd); } if self.buf.size == old_size { error!("At least one region must be specified"); } } fn draw( &mut self, vertices: Range<hal::VertexCount>, instances: Range<hal::InstanceCount>, ) { self.bind_attributes(); match self.cache.primitive { Some(primitive) => { self.push_cmd( Command::Draw { primitive, vertices, instances, } ); } None => { warn!("No primitive bound. An active pipeline needs to be bound before calling `draw`."); self.cache.error_state = true; } } } fn draw_indexed( &mut self, indices: Range<hal::IndexCount>, base_vertex: hal::VertexOffset, instances: Range<hal::InstanceCount>, ) { self.bind_attributes(); let (start, index_type) = match self.cache.index_type { Some(hal::IndexType::U16) => (indices.start * 2, gl::UNSIGNED_SHORT), Some(hal::IndexType::U32) => (indices.start * 4, gl::UNSIGNED_INT), None => { warn!("No index type bound. An index buffer needs to be bound before calling `draw_indexed`."); self.cache.error_state = true; return; } }; match self.cache.primitive { Some(primitive) => { self.push_cmd( Command::DrawIndexed { primitive, index_type, index_count: indices.end - indices.start, index_buffer_offset: start as _, base_vertex, instances, } ); } None => { warn!("No primitive bound. An active pipeline needs to be bound before calling `draw_indexed`."); self.cache.error_state = true; } } } fn draw_indirect( &mut self, _buffer: &n::Buffer, _offset: buffer::Offset, _draw_count: hal::DrawCount, _stride: u32, ) { unimplemented!() } fn draw_indexed_indirect( &mut self, _buffer: &n::Buffer, _offset: buffer::Offset, _draw_count: hal::DrawCount, _stride: u32, ) { unimplemented!() } fn begin_query( &mut self, _query: query::Query<Backend>, _flags: query::QueryControl, ) { unimplemented!() } fn push_graphics_constants( &mut self, _layout: &n::PipelineLayout, _stages: pso::ShaderStageFlags, _offset: u32, _constants: &[u32], ) { unimplemented!() } fn end_query( &mut self, _query: query::Query<Backend>, ) { unimplemented!() } fn reset_query_pool( &mut self, _pool: &(), _queries: Range<query::QueryId>, ) { unimplemented!() } fn write_timestamp( &mut self, _: pso::PipelineStage, _: query::Query<Backend>, ) { unimplemented!() } fn push_compute_constants( &mut self, _layout: &n::PipelineLayout, _offset: u32, _constants: &[u32], ) { unimplemented!() } fn execute_commands<I>( &mut self, _buffers: I, ) where I: IntoIterator, I::Item: Borrow<RawCommandBuffer> { unimplemented!() } } /// Avoids creating second mutable borrows of `self` by requiring mutable /// references only to the fields it needs. Many functions will simply use /// `push_cmd`, but this is needed when the caller would like to perform a /// partial borrow to `self`. For example, iterating through a field on /// `self` and calling `self.push_cmd` per iteration. fn push_cmd_internal(id: &u64, memory: &mut Arc<Mutex<BufferMemory>>, buffer: &mut BufferSlice, cmd: Command) { let mut memory = memory .try_lock() .expect("Trying to record a command buffers, while memory is in-use."); let cmd_buffer = match *memory { BufferMemory::Linear(ref mut buffer) => &mut buffer.commands, BufferMemory::Individual { ref mut storage, .. } => { &mut storage.get_mut(id).unwrap().commands } }; cmd_buffer.push(cmd); buffer.append(BufferSlice { offset: cmd_buffer.len() as u32 - 1, size: 1, }); }
32.276596
130
0.547502
d74a2be018ea38fd5db1e862e6dd1b07851fb002
128
use validator::Validate; #[derive(Validate)] struct Test { #[validate(range(mi = 2, max = 3))] s: i32, } fn main() {}
12.8
39
0.578125
0127f10f6ff102fbcb3f3314ace099da9f82a5a5
33,305
//! Classic marching cubes cases. //! There are 256 possible combinations of the above/below iso-surface states for the 8 vertices //! of a cube. The following lookup table maps each combination to the corresponding triangulation. //! //! The index for a case is obtained with a bitfield of size 8, where a bit value of 1 //! indicates that the corresponding vertex of the cube is above the iso-surface threshold. //! Reversing the order of the bit pattern and interpreting it as an integer yields the case index. //! //! For each case, the triangulation is represented by a 16 element array containing successive //! index triplets for each required triangle. The indices refer to the corresponding edges that //! are intersected by the triangle. Each case has at most three triangles and unused entries of the //! 16 element arrays are filled with -1 entries for padding. //! //! Example: //! - Vertex 0 and 2 are above the iso-surface threshold. //! - The corresponding bit pattern is `10100000`, the corresponding index is 5 //! - The case with index 5 reads `[ 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]` //! - The triangulation is given by the two triangles `[0, 8, 3]` and `[1, 2, 10]`, with vertices //! on the edges identified by the given indices //! //! Note that the raw table apparently uses a left-handed coordinate system and accordingly a //! clockwise winding order of the triangles. To avoid producing meshes with normals pointing into //! the reconstructed surface, the resulting triangles have to be flipped. This is already taken //! into account by the [`marching_cubes_triangulation_iter`] function. //! //! Cube description: //! //! ```text //! 7 ________ 6 _____6__ //! /| /| 7/| /| //! / | / | / | /5 | //! 4 /_______ / | /__4____ / 10 //! | | |5 | | 11 | | //! | 3|__|_____|2 | |__|__2__| //! | / | / 8 3/ 9 / //! | / | / | / | /1 //! |/_______|/ |/___0___|/ //! 0 1 //! Vertices Edges //! ``` /// The classic marching cubes table #[rustfmt::skip] static MARCHING_CUBES_TABLE: [[i32; 16]; 256] = [ /* 0: */ [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 1: 0, */ [ 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 2: 1, */ [ 0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 3: 0, 1, */ [ 1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 4: 2, */ [ 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 5: 0, 2, */ [ 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 6: 1, 2, */ [ 9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 7: 0, 1, 2, */ [ 2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1], /* 8: 3, */ [ 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 9: 0, 3, */ [ 0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 10: 1, 3, */ [ 1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 11: 0, 1, 3, */ [ 1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1], /* 12: 2, 3, */ [ 3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 13: 0, 2, 3, */ [ 0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1], /* 14: 1, 2, 3, */ [ 3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1], /* 15: 0, 1, 2, 3, */ [ 9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 16: 4, */ [ 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 17: 0, 4, */ [ 4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 18: 1, 4, */ [ 0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 19: 0, 1, 4, */ [ 4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1], /* 20: 2, 4, */ [ 1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 21: 0, 2, 4, */ [ 3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1], /* 22: 1, 2, 4, */ [ 9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1], /* 23: 0, 1, 2, 4, */ [ 2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1], /* 24: 3, 4, */ [ 8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 25: 0, 3, 4, */ [11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1], /* 26: 1, 3, 4, */ [ 9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1], /* 27: 0, 1, 3, 4, */ [ 4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1], /* 28: 2, 3, 4, */ [ 3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1], /* 29: 0, 2, 3, 4, */ [ 1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1], /* 30: 1, 2, 3, 4, */ [ 4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1], /* 31: 0, 1, 2, 3, 4, */ [ 4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1], /* 32: 5, */ [ 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 33: 0, 5, */ [ 9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 34: 1, 5, */ [ 0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 35: 0, 1, 5, */ [ 8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1], /* 36: 2, 5, */ [ 1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 37: 0, 2, 5, */ [ 3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1], /* 38: 1, 2, 5, */ [ 5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1], /* 39: 0, 1, 2, 5, */ [ 2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1], /* 40: 3, 5, */ [ 9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 41: 0, 3, 5, */ [ 0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1], /* 42: 1, 3, 5, */ [ 0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1], /* 43: 0, 1, 3, 5, */ [ 2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1], /* 44: 2, 3, 5, */ [10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1], /* 45: 0, 2, 3, 5, */ [ 4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1], /* 46: 1, 2, 3, 5, */ [ 5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1], /* 47: 0, 1, 2, 3, 5, */ [ 5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1], /* 48: 4, 5, */ [ 9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 49: 0, 4, 5, */ [ 9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1], /* 50: 1, 4, 5, */ [ 0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1], /* 51: 0, 1, 4, 5, */ [ 1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 52: 2, 4, 5, */ [ 9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1], /* 53: 0, 2, 4, 5, */ [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1], /* 54: 1, 2, 4, 5, */ [ 8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1], /* 55: 0, 1, 2, 4, 5, */ [ 2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1], /* 56: 3, 4, 5, */ [ 7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1], /* 57: 0, 3, 4, 5, */ [ 9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1], /* 58: 1, 3, 4, 5, */ [ 2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1], /* 59: 0, 1, 3, 4, 5, */ [11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1], /* 60: 2, 3, 4, 5, */ [ 9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1], /* 61: 0, 2, 3, 4, 5, */ [ 5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1], /* 62: 1, 2, 3, 4, 5, */ [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1], /* 63: 0, 1, 2, 3, 4, 5, */ [11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 64: 6, */ [10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 65: 0, 6, */ [ 0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 66: 1, 6, */ [ 9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 67: 0, 1, 6, */ [ 1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1], /* 68: 2, 6, */ [ 1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 69: 0, 2, 6, */ [ 1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1], /* 70: 1, 2, 6, */ [ 9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1], /* 71: 0, 1, 2, 6, */ [ 5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1], /* 72: 3, 6, */ [ 2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 73: 0, 3, 6, */ [11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1], /* 74: 1, 3, 6, */ [ 0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1], /* 75: 0, 1, 3, 6, */ [ 5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1], /* 76: 2, 3, 6, */ [ 6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1], /* 77: 0, 2, 3, 6, */ [ 0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1], /* 78: 1, 2, 3, 6, */ [ 3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1], /* 79: 0, 1, 2, 3, 6, */ [ 6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1], /* 80: 4, 6, */ [ 5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 81: 0, 4, 6, */ [ 4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1], /* 82: 1, 4, 6, */ [ 1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1], /* 83: 0, 1, 4, 6, */ [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1], /* 84: 2, 4, 6, */ [ 6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1], /* 85: 0, 2, 4, 6, */ [ 1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1], /* 86: 1, 2, 4, 6, */ [ 8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1], /* 87: 0, 1, 2, 4, 6, */ [ 7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1], /* 88: 3, 4, 6, */ [ 3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1], /* 89: 0, 3, 4, 6, */ [ 5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1], /* 90: 1, 3, 4, 6, */ [ 0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1], /* 91: 0, 1, 3, 4, 6, */ [ 9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1], /* 92: 2, 3, 4, 6, */ [ 8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1], /* 93: 0, 2, 3, 4, 6, */ [ 5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1], /* 94: 1, 2, 3, 4, 6, */ [ 0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1], /* 95: 0, 1, 2, 3, 4, 6, */ [ 6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1], /* 96: 5, 6, */ [10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 97: 0, 5, 6, */ [ 4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1], /* 98: 1, 5, 6, */ [10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1], /* 99: 0, 1, 5, 6, */ [ 8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1], /* 100: 2, 5, 6, */ [ 1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1], /* 101: 0, 2, 5, 6, */ [ 3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1], /* 102: 1, 2, 5, 6, */ [ 0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 103: 0, 1, 2, 5, 6, */ [ 8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1], /* 104: 3, 5, 6, */ [10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1], /* 105: 0, 3, 5, 6, */ [ 0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1], /* 106: 1, 3, 5, 6, */ [ 3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1], /* 107: 0, 1, 3, 5, 6, */ [ 6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1], /* 108: 2, 3, 5, 6, */ [ 9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1], /* 109: 0, 2, 3, 5, 6, */ [ 8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1], /* 110: 1, 2, 3, 5, 6, */ [ 3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1], /* 111: 0, 1, 2, 3, 5, 6, */ [ 6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 112: 4, 5, 6, */ [ 7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1], /* 113: 0, 4, 5, 6, */ [ 0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1], /* 114: 1, 4, 5, 6, */ [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1], /* 115: 0, 1, 4, 5, 6, */ [10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1], /* 116: 2, 4, 5, 6, */ [ 1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1], /* 117: 0, 2, 4, 5, 6, */ [ 2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1], /* 118: 1, 2, 4, 5, 6, */ [ 7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1], /* 119: 0, 1, 2, 4, 5, 6, */ [ 7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 120: 3, 4, 5, 6, */ [ 2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1], /* 121: 0, 3, 4, 5, 6, */ [ 2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1], /* 122: 1, 3, 4, 5, 6, */ [ 1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1], /* 123: 0, 1, 3, 4, 5, 6, */ [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1], /* 124: 2, 3, 4, 5, 6, */ [ 8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1], /* 125: 0, 2, 3, 4, 5, 6, */ [ 0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 126: 1, 2, 3, 4, 5, 6, */ [ 7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1], /* 127: 0, 1, 2, 3, 4, 5, 6, */ [ 7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 128: 7, */ [ 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 129: 0, 7, */ [ 3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 130: 1, 7, */ [ 0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 131: 0, 1, 7, */ [ 8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1], /* 132: 2, 7, */ [10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 133: 0, 2, 7, */ [ 1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1], /* 134: 1, 2, 7, */ [ 2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1], /* 135: 0, 1, 2, 7, */ [ 6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1], /* 136: 3, 7, */ [ 7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 137: 0, 3, 7, */ [ 7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1], /* 138: 1, 3, 7, */ [ 2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1], /* 139: 0, 1, 3, 7, */ [ 1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1], /* 140: 2, 3, 7, */ [10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1], /* 141: 0, 2, 3, 7, */ [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1], /* 142: 1, 2, 3, 7, */ [ 0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1], /* 143: 0, 1, 2, 3, 7, */ [ 7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1], /* 144: 4, 7, */ [ 6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 145: 0, 4, 7, */ [ 3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1], /* 146: 1, 4, 7, */ [ 8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1], /* 147: 0, 1, 4, 7, */ [ 9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1], /* 148: 2, 4, 7, */ [ 6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1], /* 149: 0, 2, 4, 7, */ [ 1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1], /* 150: 1, 2, 4, 7, */ [ 4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1], /* 151: 0, 1, 2, 4, 7, */ [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1], /* 152: 3, 4, 7, */ [ 8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1], /* 153: 0, 3, 4, 7, */ [ 0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 154: 1, 3, 4, 7, */ [ 1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1], /* 155: 0, 1, 3, 4, 7, */ [ 1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1], /* 156: 2, 3, 4, 7, */ [ 8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1], /* 157: 0, 2, 3, 4, 7, */ [10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1], /* 158: 1, 2, 3, 4, 7, */ [ 4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1], /* 159: 0, 1, 2, 3, 4, 7, */ [10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 160: 5, 7, */ [ 4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 161: 0, 5, 7, */ [ 0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1], /* 162: 1, 5, 7, */ [ 5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1], /* 163: 0, 1, 5, 7, */ [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1], /* 164: 2, 5, 7, */ [ 9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1], /* 165: 0, 2, 5, 7, */ [ 6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1], /* 166: 1, 2, 5, 7, */ [ 7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1], /* 167: 0, 1, 2, 5, 7, */ [ 3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1], /* 168: 3, 5, 7, */ [ 7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1], /* 169: 0, 3, 5, 7, */ [ 9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1], /* 170: 1, 3, 5, 7, */ [ 3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1], /* 171: 0, 1, 3, 5, 7, */ [ 6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1], /* 172: 2, 3, 5, 7, */ [ 9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1], /* 173: 0, 2, 3, 5, 7, */ [ 1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1], /* 174: 1, 2, 3, 5, 7, */ [ 4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1], /* 175: 0, 1, 2, 3, 5, 7, */ [ 7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1], /* 176: 4, 5, 7, */ [ 6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1], /* 177: 0, 4, 5, 7, */ [ 3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1], /* 178: 1, 4, 5, 7, */ [ 0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1], /* 179: 0, 1, 4, 5, 7, */ [ 6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1], /* 180: 2, 4, 5, 7, */ [ 1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1], /* 181: 0, 2, 4, 5, 7, */ [ 0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1], /* 182: 1, 2, 4, 5, 7, */ [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1], /* 183: 0, 1, 2, 4, 5, 7, */ [ 6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1], /* 184: 3, 4, 5, 7, */ [ 5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1], /* 185: 0, 3, 4, 5, 7, */ [ 9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1], /* 186: 1, 3, 4, 5, 7, */ [ 1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1], /* 187: 0, 1, 3, 4, 5, 7, */ [ 1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 188: 2, 3, 4, 5, 7, */ [ 1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1], /* 189: 0, 2, 3, 4, 5, 7, */ [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1], /* 190: 1, 2, 3, 4, 5, 7, */ [ 0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 191: 0, 1, 2, 3, 4, 5, 7, */ [10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 192: 6, 7, */ [11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 193: 0, 6, 7, */ [11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1], /* 194: 1, 6, 7, */ [ 5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1], /* 195: 0, 1, 6, 7, */ [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1], /* 196: 2, 6, 7, */ [11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1], /* 197: 0, 2, 6, 7, */ [ 0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1], /* 198: 1, 2, 6, 7, */ [ 9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1], /* 199: 0, 1, 2, 6, 7, */ [ 7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1], /* 200: 3, 6, 7, */ [ 2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1], /* 201: 0, 3, 6, 7, */ [ 8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1], /* 202: 1, 3, 6, 7, */ [ 9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1], /* 203: 0, 1, 3, 6, 7, */ [ 9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1], /* 204: 2, 3, 6, 7, */ [ 1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 205: 0, 2, 3, 6, 7, */ [ 0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1], /* 206: 1, 2, 3, 6, 7, */ [ 9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1], /* 207: 0, 1, 2, 3, 6, 7, */ [ 9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 208: 4, 6, 7, */ [ 5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1], /* 209: 0, 4, 6, 7, */ [ 5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1], /* 210: 1, 4, 6, 7, */ [ 0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1], /* 211: 0, 1, 4, 6, 7, */ [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1], /* 212: 2, 4, 6, 7, */ [ 2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1], /* 213: 0, 2, 4, 6, 7, */ [ 0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1], /* 214: 1, 2, 4, 6, 7, */ [ 0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1], /* 215: 0, 1, 2, 4, 6, 7, */ [ 9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 216: 3, 4, 6, 7, */ [ 2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1], /* 217: 0, 3, 4, 6, 7, */ [ 5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1], /* 218: 1, 3, 4, 6, 7, */ [ 3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1], /* 219: 0, 1, 3, 4, 6, 7, */ [ 5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1], /* 220: 2, 3, 4, 6, 7, */ [ 8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1], /* 221: 0, 2, 3, 4, 6, 7, */ [ 0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 222: 1, 2, 3, 4, 6, 7, */ [ 8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1], /* 223: 0, 1, 2, 3, 4, 6, 7, */ [ 9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 224: 5, 6, 7, */ [ 4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1], /* 225: 0, 5, 6, 7, */ [ 0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1], /* 226: 1, 5, 6, 7, */ [ 1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1], /* 227: 0, 1, 5, 6, 7, */ [ 3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1], /* 228: 2, 5, 6, 7, */ [ 4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1], /* 229: 0, 2, 5, 6, 7, */ [ 9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1], /* 230: 1, 2, 5, 6, 7, */ [11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1], /* 231: 0, 1, 2, 5, 6, 7, */ [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1], /* 232: 3, 5, 6, 7, */ [ 2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1], /* 233: 0, 3, 5, 6, 7, */ [ 9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1], /* 234: 1, 3, 5, 6, 7, */ [ 3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1], /* 235: 0, 1, 3, 5, 6, 7, */ [ 1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 236: 2, 3, 5, 6, 7, */ [ 4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1], /* 237: 0, 2, 3, 5, 6, 7, */ [ 4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1], /* 238: 1, 2, 3, 5, 6, 7, */ [ 4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 239: 0, 1, 2, 3, 5, 6, 7, */ [ 4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 240: 4, 5, 6, 7, */ [ 9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 241: 0, 4, 5, 6, 7, */ [ 3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1], /* 242: 1, 4, 5, 6, 7, */ [ 0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1], /* 243: 0, 1, 4, 5, 6, 7, */ [ 3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 244: 2, 4, 5, 6, 7, */ [ 1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1], /* 245: 0, 2, 4, 5, 6, 7, */ [ 3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1], /* 246: 1, 2, 4, 5, 6, 7, */ [ 0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 247: 0, 1, 2, 4, 5, 6, 7, */ [ 3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 248: 3, 4, 5, 6, 7, */ [ 2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1], /* 249: 0, 3, 4, 5, 6, 7, */ [ 9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 250: 1, 3, 4, 5, 6, 7, */ [ 2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1], /* 251: 0, 1, 3, 4, 5, 6, 7, */ [ 1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 252: 2, 3, 4, 5, 6, 7, */ [ 1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 253: 0, 2, 3, 4, 5, 6, 7, */ [ 0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 254: 1, 2, 3, 4, 5, 6, 7, */ [ 0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], /* 255: 0, 1, 2, 3, 4, 5, 6, 7, */ [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ]; /// Returns a reference into the marching cubes LUT to the case corresponding to the given vertex configuration pub fn get_marching_cubes_triangulation_raw(vertices_inside: &[bool; 8]) -> &'static [i32; 16] { let index = flags_to_index(vertices_inside); &MARCHING_CUBES_TABLE[index] } /// Returns the marching cubes triangulation corresponding to the given vertex configuration /// /// In the vertex configuration, a `true` value indicates that the given vertex is inside the /// iso-surface, i.e. above the iso-surface threshold value. The returned iterator yields /// at most 5 triangles defined by the indices of the edges of their corner vertices. pub fn marching_cubes_triangulation_iter( vertices_inside: &[bool; 8], ) -> impl Iterator<Item = [i32; 3]> { let triangulation = get_marching_cubes_triangulation_raw(vertices_inside); (0..5) .into_iter() .map(move |i| triangulation_to_triangle(triangulation, i)) .flatten() } /// Converts an array of bool representing bits to the corresponding usize, the order of the bits is least to most significant fn flags_to_index(flags: &[bool; 8]) -> usize { let mut index = 0; for &bit in flags.iter().rev() { index = (index << 1) | bit as usize } index } /// Extracts the triangle with the given index from the triangulation fn triangulation_to_triangle(triangulation: &[i32; 16], triangle_index: usize) -> Option<[i32; 3]> { let i = triangle_index; if triangulation[3 * i] == -1 { None } else { // Reverse the vertex index order to fix winding order (so that normals point outwards) Some([ triangulation[3 * i + 2], triangulation[3 * i + 1], triangulation[3 * i + 0], ]) } } #[cfg(test)] #[allow(unused)] mod test_lut { use super::*; /// A dumb integer -> bit flags conversion using format! fn index_to_flags(index: usize) -> [bool; 8] { assert!(index <= 256); let b: Vec<char> = format!("{:08b}", index).chars().collect(); [ b[7] == '1', b[6] == '1', b[5] == '1', b[4] == '1', b[3] == '1', b[2] == '1', b[1] == '1', b[0] == '1', ] } /// Inverts all bools in a flag array fn inverse_flags(flags: &[bool; 8]) -> [bool; 8] { [ !flags[0], !flags[1], !flags[2], !flags[3], !flags[4], !flags[5], !flags[6], !flags[7], ] } #[test] fn test_flag_conversion_roundtrip() { assert_eq!(MARCHING_CUBES_TABLE.len(), 256); for i in 0..256 { let flags = index_to_flags(i); let index = flags_to_index(&flags); assert_eq!(i, index); } } #[test] fn test_get_marching_cubes_triangulation_raw() { assert_eq!(MARCHING_CUBES_TABLE.len(), 256); for i in 0..256 { assert_eq!( MARCHING_CUBES_TABLE[i], *get_marching_cubes_triangulation_raw(&index_to_flags(i)) ) } } #[test] fn test_get_marching_cubes_triangulation_iter() { assert_eq!(MARCHING_CUBES_TABLE.len(), 256); for i in 0..256 { let flags = index_to_flags(i); let raw = get_marching_cubes_triangulation_raw(&flags); let mut tri_counter = 0; for tri in marching_cubes_triangulation_iter(&flags) { let mut vec_raw = raw[3 * tri_counter..3 * tri_counter + 3].to_vec(); let mut vec_tri = tri.to_vec(); vec_raw.sort(); vec_tri.sort(); assert_eq!(vec_raw, vec_tri); tri_counter += 1; } assert_eq!( raw[3 * tri_counter], -1, "There are more triangles in the raw case then returned by the iterator!" ) } } #[test] fn test_marching_cubes_triangulation_iter() { assert!(marching_cubes_triangulation_iter(&[ false, false, false, false, false, false, false, false ]) .next() .is_none(),); assert_eq!( marching_cubes_triangulation_iter(&[ true, false, false, false, false, false, false, false ]) .collect::<Vec<_>>(), vec![[3, 8, 0]] ); assert_eq!( marching_cubes_triangulation_iter(&[ false, false, true, false, true, false, false, false ]) .collect::<Vec<_>>(), vec![[10, 2, 1], [7, 4, 8]] ); } }
73.359031
126
0.304759
fbeda43af42b0aef48fa2cff6a4fed28e2651e13
90,156
use self::RecursiveTypeDescription::*; use self::MemberDescriptionFactory::*; use self::EnumDiscriminantInfo::*; use super::utils::{debug_context, DIB, span_start, get_namespace_for_item, create_DIArray, is_node_local_to_unit}; use super::namespace::mangled_name_of_instance; use super::type_names::compute_debuginfo_type_name; use super::{CrateDebugContext}; use crate::abi; use crate::value::Value; use rustc_codegen_ssa::traits::*; use crate::llvm; use crate::llvm::debuginfo::{DIArray, DIType, DIFile, DIScope, DIDescriptor, DICompositeType, DILexicalBlock, DIFlags, DebugEmissionKind}; use crate::llvm_util; use crate::common::CodegenCx; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def::CtorKind; use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; use rustc::ich::NodeIdHashingMode; use rustc::mir::Field; use rustc::mir::GeneratorLayout; use rustc::mir::interpret::truncate; use rustc_data_structures::fingerprint::Fingerprint; use rustc::ty::Instance; use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt}; use rustc::ty::layout::{self, Align, Integer, IntegerExt, LayoutOf, PrimitiveExt, Size, TyLayout, VariantIdx}; use rustc::ty::subst::UnpackedKind; use rustc::session::config::{self, DebugInfo}; use rustc::util::nodemap::FxHashMap; use rustc_fs_util::path_to_c_string; use rustc_data_structures::small_c_str::SmallCStr; use rustc_target::abi::HasDataLayout; use libc::{c_uint, c_longlong}; use std::collections::hash_map::Entry; use std::ffi::CString; use std::fmt::{self, Write}; use std::hash::{Hash, Hasher}; use std::iter; use std::ptr; use std::path::{Path, PathBuf}; use syntax::ast; use syntax::symbol::{Interner, InternedString}; use syntax_pos::{self, Span, FileName}; impl PartialEq for llvm::Metadata { fn eq(&self, other: &Self) -> bool { ptr::eq(self, other) } } impl Eq for llvm::Metadata {} impl Hash for llvm::Metadata { fn hash<H: Hasher>(&self, hasher: &mut H) { (self as *const Self).hash(hasher); } } impl fmt::Debug for llvm::Metadata { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (self as *const Self).fmt(f) } } // From DWARF 5. // See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1 const DW_LANG_RUST: c_uint = 0x1c; #[allow(non_upper_case_globals)] const DW_ATE_boolean: c_uint = 0x02; #[allow(non_upper_case_globals)] const DW_ATE_float: c_uint = 0x04; #[allow(non_upper_case_globals)] const DW_ATE_signed: c_uint = 0x05; #[allow(non_upper_case_globals)] const DW_ATE_unsigned: c_uint = 0x07; #[allow(non_upper_case_globals)] const DW_ATE_unsigned_char: c_uint = 0x08; pub const UNKNOWN_LINE_NUMBER: c_uint = 0; pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0; pub const NO_SCOPE_METADATA: Option<&DIScope> = None; #[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] pub struct UniqueTypeId(ast::Name); // The TypeMap is where the CrateDebugContext holds the type metadata nodes // created so far. The metadata nodes are indexed by UniqueTypeId, and, for // faster lookup, also by Ty. The TypeMap is responsible for creating // UniqueTypeIds. #[derive(Default)] pub struct TypeMap<'ll, 'tcx> { // The UniqueTypeIds created so far unique_id_interner: Interner, // A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping. unique_id_to_metadata: FxHashMap<UniqueTypeId, &'ll DIType>, // A map from types to debuginfo metadata. This is a N:1 mapping. type_to_metadata: FxHashMap<Ty<'tcx>, &'ll DIType>, // A map from types to UniqueTypeId. This is a N:1 mapping. type_to_unique_id: FxHashMap<Ty<'tcx>, UniqueTypeId> } impl TypeMap<'ll, 'tcx> { // Adds a Ty to metadata mapping to the TypeMap. The method will fail if // the mapping already exists. fn register_type_with_metadata( &mut self, type_: Ty<'tcx>, metadata: &'ll DIType, ) { if self.type_to_metadata.insert(type_, metadata).is_some() { bug!("Type metadata for Ty '{}' is already in the TypeMap!", type_); } } // Removes a Ty to metadata mapping // This is useful when computing the metadata for a potentially // recursive type (e.g. a function ptr of the form: // // fn foo() -> impl Copy { foo } // // This kind of type cannot be properly represented // via LLVM debuginfo. As a workaround, // we register a temporary Ty to metadata mapping // for the function before we compute its actual metadata. // If the metadata computation ends up recursing back to the // original function, it will use the temporary mapping // for the inner self-reference, preventing us from // recursing forever. // // This function is used to remove the temporary metadata // mapping after we've computed the actual metadata fn remove_type( &mut self, type_: Ty<'tcx>, ) { if self.type_to_metadata.remove(type_).is_none() { bug!("Type metadata Ty '{}' is not in the TypeMap!", type_); } } // Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will // fail if the mapping already exists. fn register_unique_id_with_metadata( &mut self, unique_type_id: UniqueTypeId, metadata: &'ll DIType, ) { if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() { bug!("Type metadata for unique id '{}' is already in the TypeMap!", self.get_unique_type_id_as_string(unique_type_id)); } } fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option<&'ll DIType> { self.type_to_metadata.get(&type_).cloned() } fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option<&'ll DIType> { self.unique_id_to_metadata.get(&unique_type_id).cloned() } // Get the string representation of a UniqueTypeId. This method will fail if // the id is unknown. fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> &str { let UniqueTypeId(interner_key) = unique_type_id; self.unique_id_interner.get(interner_key) } // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given // type has been requested before, this is just a table lookup. Otherwise an // ID will be generated and stored for later lookup. fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx>, type_: Ty<'tcx>) -> UniqueTypeId { // Let's see if we already have something in the cache if let Some(unique_type_id) = self.type_to_unique_id.get(&type_).cloned() { return unique_type_id; } // if not, generate one // The hasher we are using to generate the UniqueTypeId. We want // something that provides more than the 64 bits of the DefaultHasher. let mut hasher = StableHasher::<Fingerprint>::new(); let mut hcx = cx.tcx.create_stable_hashing_context(); let type_ = cx.tcx.erase_regions(&type_); hcx.while_hashing_spans(false, |hcx| { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { type_.hash_stable(hcx, &mut hasher); }); }); let unique_type_id = hasher.finish().to_hex(); let key = self.unique_id_interner.intern(&unique_type_id); self.type_to_unique_id.insert(type_, UniqueTypeId(key)); return UniqueTypeId(key); } // Get the UniqueTypeId for an enum variant. Enum variants are not really // types of their own, so they need special handling. We still need a // UniqueTypeId for them, since to debuginfo they *are* real types. fn get_unique_type_id_of_enum_variant<'a>(&mut self, cx: &CodegenCx<'a, 'tcx>, enum_type: Ty<'tcx>, variant_name: &str) -> UniqueTypeId { let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type); let enum_variant_type_id = format!("{}::{}", self.get_unique_type_id_as_string(enum_type_id), variant_name); let interner_key = self.unique_id_interner.intern(&enum_variant_type_id); UniqueTypeId(interner_key) } // Get the unique type id string for an enum variant part. // Variant parts are not types and shouldn't really have their own id, // but it makes set_members_of_composite_type() simpler. fn get_unique_type_id_str_of_enum_variant_part(&mut self, enum_type_id: UniqueTypeId) -> &str { let variant_part_type_id = format!("{}_variant_part", self.get_unique_type_id_as_string(enum_type_id)); let interner_key = self.unique_id_interner.intern(&variant_part_type_id); self.unique_id_interner.get(interner_key) } } // A description of some recursive type. It can either be already finished (as // with FinalMetadata) or it is not yet finished, but contains all information // needed to generate the missing parts of the description. See the // documentation section on Recursive Types at the top of this file for more // information. enum RecursiveTypeDescription<'ll, 'tcx> { UnfinishedMetadata { unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: &'ll DICompositeType, member_holding_stub: &'ll DICompositeType, member_description_factory: MemberDescriptionFactory<'ll, 'tcx>, }, FinalMetadata(&'ll DICompositeType) } fn create_and_register_recursive_type_forward_declaration( cx: &CodegenCx<'ll, 'tcx>, unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: &'ll DICompositeType, member_holding_stub: &'ll DICompositeType, member_description_factory: MemberDescriptionFactory<'ll, 'tcx>, ) -> RecursiveTypeDescription<'ll, 'tcx> { // Insert the stub into the TypeMap in order to allow for recursive references let mut type_map = debug_context(cx).type_map.borrow_mut(); type_map.register_unique_id_with_metadata(unique_type_id, metadata_stub); type_map.register_type_with_metadata(unfinished_type, metadata_stub); UnfinishedMetadata { unfinished_type, unique_type_id, metadata_stub, member_holding_stub, member_description_factory, } } impl RecursiveTypeDescription<'ll, 'tcx> { // Finishes up the description of the type in question (mostly by providing // descriptions of the fields of the given type) and returns the final type // metadata. fn finalize(&self, cx: &CodegenCx<'ll, 'tcx>) -> MetadataCreationResult<'ll> { match *self { FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false), UnfinishedMetadata { unfinished_type, unique_type_id, metadata_stub, member_holding_stub, ref member_description_factory, } => { // Make sure that we have a forward declaration of the type in // the TypeMap so that recursive references are possible. This // will always be the case if the RecursiveTypeDescription has // been properly created through the // create_and_register_recursive_type_forward_declaration() // function. { let type_map = debug_context(cx).type_map.borrow(); if type_map.find_metadata_for_unique_id(unique_type_id).is_none() || type_map.find_metadata_for_type(unfinished_type).is_none() { bug!("Forward declaration of potentially recursive type \ '{:?}' was not found in TypeMap!", unfinished_type); } } // ... then create the member descriptions ... let member_descriptions = member_description_factory.create_member_descriptions(cx); // ... and attach them to the stub to complete it. set_members_of_composite_type(cx, unfinished_type, member_holding_stub, member_descriptions); return MetadataCreationResult::new(metadata_stub, true); } } } } // Returns from the enclosing function if the type metadata with the given // unique id can be found in the type map macro_rules! return_if_metadata_created_in_meantime { ($cx: expr, $unique_type_id: expr) => ( if let Some(metadata) = debug_context($cx).type_map .borrow() .find_metadata_for_unique_id($unique_type_id) { return MetadataCreationResult::new(metadata, true); } ) } fn fixed_vec_metadata( cx: &CodegenCx<'ll, 'tcx>, unique_type_id: UniqueTypeId, array_or_slice_type: Ty<'tcx>, element_type: Ty<'tcx>, span: Span, ) -> MetadataCreationResult<'ll> { let element_type_metadata = type_metadata(cx, element_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); let (size, align) = cx.size_and_align_of(array_or_slice_type); let upper_bound = match array_or_slice_type.sty { ty::Array(_, len) => len.unwrap_usize(cx.tcx) as c_longlong, _ => -1 }; let subrange = unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) }; let subscripts = create_DIArray(DIB(cx), &[subrange]); let metadata = unsafe { llvm::LLVMRustDIBuilderCreateArrayType( DIB(cx), size.bits(), align.bits() as u32, element_type_metadata, subscripts) }; return MetadataCreationResult::new(metadata, false); } fn vec_slice_metadata( cx: &CodegenCx<'ll, 'tcx>, slice_ptr_type: Ty<'tcx>, element_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span, ) -> MetadataCreationResult<'ll> { let data_ptr_type = cx.tcx.mk_imm_ptr(element_type); let data_ptr_metadata = type_metadata(cx, data_ptr_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); let slice_type_name = compute_debuginfo_type_name(cx.tcx, slice_ptr_type, true); let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type); let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx.types.usize); let member_descriptions = vec![ MemberDescription { name: "data_ptr".to_owned(), type_metadata: data_ptr_metadata, offset: Size::ZERO, size: pointer_size, align: pointer_align, flags: DIFlags::FlagZero, discriminant: None, }, MemberDescription { name: "length".to_owned(), type_metadata: type_metadata(cx, cx.tcx.types.usize, span), offset: pointer_size, size: usize_size, align: usize_align, flags: DIFlags::FlagZero, discriminant: None, }, ]; let file_metadata = unknown_file_metadata(cx); let metadata = composite_type_metadata(cx, slice_ptr_type, &slice_type_name[..], unique_type_id, member_descriptions, NO_SCOPE_METADATA, file_metadata, span); MetadataCreationResult::new(metadata, false) } fn subroutine_type_metadata( cx: &CodegenCx<'ll, 'tcx>, unique_type_id: UniqueTypeId, signature: ty::PolyFnSig<'tcx>, span: Span, ) -> MetadataCreationResult<'ll> { let signature = cx.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &signature, ); let signature_metadata: Vec<_> = iter::once( // return type match signature.output().sty { ty::Tuple(ref tys) if tys.is_empty() => None, _ => Some(type_metadata(cx, signature.output(), span)) } ).chain( // regular arguments signature.inputs().iter().map(|argument_type| { Some(type_metadata(cx, argument_type, span)) }) ).collect(); return_if_metadata_created_in_meantime!(cx, unique_type_id); return MetadataCreationResult::new( unsafe { llvm::LLVMRustDIBuilderCreateSubroutineType( DIB(cx), unknown_file_metadata(cx), create_DIArray(DIB(cx), &signature_metadata[..])) }, false); } // FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill- // defined concept. For the case of an actual trait pointer (i.e., Box<Trait>, // &Trait), trait_object_type should be the whole thing (e.g, Box<Trait>) and // trait_type should be the actual trait (e.g., Trait). Where the trait is part // of a DST struct, there is no trait_object_type and the results of this // function will be a little bit weird. fn trait_pointer_metadata( cx: &CodegenCx<'ll, 'tcx>, trait_type: Ty<'tcx>, trait_object_type: Option<Ty<'tcx>>, unique_type_id: UniqueTypeId, ) -> &'ll DIType { // The implementation provided here is a stub. It makes sure that the trait // type is assigned the correct name, size, namespace, and source location. // But it does not describe the trait's methods. let containing_scope = match trait_type.sty { ty::Dynamic(ref data, ..) => data.principal_def_id().map(|did| get_namespace_for_item(cx, did)), _ => { bug!("debuginfo: Unexpected trait-object type in \ trait_pointer_metadata(): {:?}", trait_type); } }; let trait_object_type = trait_object_type.unwrap_or(trait_type); let trait_type_name = compute_debuginfo_type_name(cx.tcx, trait_object_type, false); let file_metadata = unknown_file_metadata(cx); let layout = cx.layout_of(cx.tcx.mk_mut_ptr(trait_type)); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); let data_ptr_field = layout.field(cx, 0); let vtable_field = layout.field(cx, 1); let member_descriptions = vec![ MemberDescription { name: "pointer".to_owned(), type_metadata: type_metadata(cx, cx.tcx.mk_mut_ptr(cx.tcx.types.u8), syntax_pos::DUMMY_SP), offset: layout.fields.offset(0), size: data_ptr_field.size, align: data_ptr_field.align.abi, flags: DIFlags::FlagArtificial, discriminant: None, }, MemberDescription { name: "vtable".to_owned(), type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), offset: layout.fields.offset(1), size: vtable_field.size, align: vtable_field.align.abi, flags: DIFlags::FlagArtificial, discriminant: None, }, ]; composite_type_metadata(cx, trait_object_type, &trait_type_name[..], unique_type_id, member_descriptions, containing_scope, file_metadata, syntax_pos::DUMMY_SP) } pub fn type_metadata( cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>, usage_site_span: Span, ) -> &'ll DIType { // Get the unique type id of this type. let unique_type_id = { let mut type_map = debug_context(cx).type_map.borrow_mut(); // First, try to find the type in TypeMap. If we have seen it before, we // can exit early here. match type_map.find_metadata_for_type(t) { Some(metadata) => { return metadata; }, None => { // The Ty is not in the TypeMap but maybe we have already seen // an equivalent type (e.g., only differing in region arguments). // In order to find out, generate the unique type id and look // that up. let unique_type_id = type_map.get_unique_type_id_of_type(cx, t); match type_map.find_metadata_for_unique_id(unique_type_id) { Some(metadata) => { // There is already an equivalent type in the TypeMap. // Register this Ty as an alias in the cache and // return the cached metadata. type_map.register_type_with_metadata(t, metadata); return metadata; }, None => { // There really is no type metadata for this type, so // proceed by creating it. unique_type_id } } } } }; debug!("type_metadata: {:?}", t); let ptr_metadata = |ty: Ty<'tcx>| { match ty.sty { ty::Slice(typ) => { Ok(vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span)) } ty::Str => { Ok(vec_slice_metadata(cx, t, cx.tcx.types.u8, unique_type_id, usage_site_span)) } ty::Dynamic(..) => { Ok(MetadataCreationResult::new( trait_pointer_metadata(cx, ty, Some(t), unique_type_id), false)) } _ => { let pointee_metadata = type_metadata(cx, ty, usage_site_span); if let Some(metadata) = debug_context(cx).type_map .borrow() .find_metadata_for_unique_id(unique_type_id) { return Err(metadata); } Ok(MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata), false)) } } }; let MetadataCreationResult { metadata, already_stored_in_typemap } = match t.sty { ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } ty::Tuple(ref elements) if elements.is_empty() => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } ty::Array(typ, _) | ty::Slice(typ) => { fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span) } ty::Str => { fixed_vec_metadata(cx, unique_type_id, t, cx.tcx.types.i8, usage_site_span) } ty::Dynamic(..) => { MetadataCreationResult::new( trait_pointer_metadata(cx, t, None, unique_type_id), false) } ty::Foreign(..) => { MetadataCreationResult::new( foreign_type_metadata(cx, t, unique_type_id), false) } ty::RawPtr(ty::TypeAndMut{ty, ..}) | ty::Ref(_, ty, _) => { match ptr_metadata(ty) { Ok(res) => res, Err(metadata) => return metadata, } } ty::Adt(def, _) if def.is_box() => { match ptr_metadata(t.boxed_ty()) { Ok(res) => res, Err(metadata) => return metadata, } } ty::FnDef(..) | ty::FnPtr(_) => { if let Some(metadata) = debug_context(cx).type_map .borrow() .find_metadata_for_unique_id(unique_type_id) { return metadata; } // It's possible to create a self-referential // type in Rust by using 'impl trait': // // fn foo() -> impl Copy { foo } // // See TypeMap::remove_type for more detals // about the workaround let temp_type = { unsafe { // The choice of type here is pretty arbitrary - // anything reading the debuginfo for a recursive // type is going to see *somthing* weird - the only // question is what exactly it will see let (size, align) = cx.size_and_align_of(t); llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), SmallCStr::new("<recur_type>").as_ptr(), size.bits(), align.bits() as u32, DW_ATE_unsigned) } }; let type_map = &debug_context(cx).type_map; type_map.borrow_mut().register_type_with_metadata(t, temp_type); let fn_metadata = subroutine_type_metadata(cx, unique_type_id, t.fn_sig(cx.tcx), usage_site_span).metadata; type_map.borrow_mut().remove_type(t); // This is actually a function pointer, so wrap it in pointer DI MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false) } ty::Closure(def_id, substs) => { let upvar_tys : Vec<_> = substs.upvar_tys(def_id, cx.tcx).collect(); prepare_tuple_metadata(cx, t, &upvar_tys, unique_type_id, usage_site_span).finalize(cx) } ty::Generator(def_id, substs, _) => { let upvar_tys : Vec<_> = substs.prefix_tys(def_id, cx.tcx).map(|t| { cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t) }).collect(); prepare_enum_metadata(cx, t, def_id, unique_type_id, usage_site_span, upvar_tys).finalize(cx) } ty::Adt(def, ..) => match def.adt_kind() { AdtKind::Struct => { prepare_struct_metadata(cx, t, unique_type_id, usage_site_span).finalize(cx) } AdtKind::Union => { prepare_union_metadata(cx, t, unique_type_id, usage_site_span).finalize(cx) } AdtKind::Enum => { prepare_enum_metadata(cx, t, def.did, unique_type_id, usage_site_span, vec![]).finalize(cx) } }, ty::Tuple(ref elements) => { let tys: Vec<_> = elements.iter().map(|k| k.expect_ty()).collect(); prepare_tuple_metadata(cx, t, &tys, unique_type_id, usage_site_span).finalize(cx) } _ => { bug!("debuginfo: unexpected type in type_metadata: {:?}", t) } }; { let mut type_map = debug_context(cx).type_map.borrow_mut(); if already_stored_in_typemap { // Also make sure that we already have a TypeMap entry for the unique type id. let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) { Some(metadata) => metadata, None => { span_bug!(usage_site_span, "Expected type metadata for unique \ type id '{}' to already be in \ the debuginfo::TypeMap but it \ was not. (Ty = {})", type_map.get_unique_type_id_as_string(unique_type_id), t); } }; match type_map.find_metadata_for_type(t) { Some(metadata) => { if metadata != metadata_for_uid { span_bug!(usage_site_span, "Mismatch between Ty and \ UniqueTypeId maps in \ debuginfo::TypeMap. \ UniqueTypeId={}, Ty={}", type_map.get_unique_type_id_as_string(unique_type_id), t); } } None => { type_map.register_type_with_metadata(t, metadata); } } } else { type_map.register_type_with_metadata(t, metadata); type_map.register_unique_id_with_metadata(unique_type_id, metadata); } } metadata } pub fn file_metadata(cx: &CodegenCx<'ll, '_>, file_name: &FileName, defining_crate: CrateNum) -> &'ll DIFile { debug!("file_metadata: file_name: {}, defining_crate: {}", file_name, defining_crate); let file_name = Some(file_name.to_string()); let directory = if defining_crate == LOCAL_CRATE { Some(cx.sess().working_dir.0.to_string_lossy().to_string()) } else { // If the path comes from an upstream crate we assume it has been made // independent of the compiler's working directory one way or another. None }; file_metadata_raw(cx, file_name, directory) } pub fn unknown_file_metadata(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { file_metadata_raw(cx, None, None) } fn file_metadata_raw(cx: &CodegenCx<'ll, '_>, file_name: Option<String>, directory: Option<String>) -> &'ll DIFile { let key = (file_name, directory); match debug_context(cx).created_files.borrow_mut().entry(key) { Entry::Occupied(o) => return o.get(), Entry::Vacant(v) => { let (file_name, directory) = v.key(); debug!("file_metadata: file_name: {:?}, directory: {:?}", file_name, directory); let file_name = SmallCStr::new( if let Some(file_name) = file_name { &file_name } else { "<unknown>" }); let directory = SmallCStr::new( if let Some(directory) = directory { &directory } else { "" }); let file_metadata = unsafe { llvm::LLVMRustDIBuilderCreateFile(DIB(cx), file_name.as_ptr(), directory.as_ptr()) }; v.insert(file_metadata); file_metadata } } } fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { debug!("basic_type_metadata: {:?}", t); let (name, encoding) = match t.sty { ty::Never => ("!", DW_ATE_unsigned), ty::Tuple(ref elements) if elements.is_empty() => ("()", DW_ATE_unsigned), ty::Bool => ("bool", DW_ATE_boolean), ty::Char => ("char", DW_ATE_unsigned_char), ty::Int(int_ty) => { (int_ty.ty_to_string(), DW_ATE_signed) }, ty::Uint(uint_ty) => { (uint_ty.ty_to_string(), DW_ATE_unsigned) }, ty::Float(float_ty) => { (float_ty.ty_to_string(), DW_ATE_float) }, _ => bug!("debuginfo::basic_type_metadata - t is invalid type") }; let (size, align) = cx.size_and_align_of(t); let name = SmallCStr::new(name); let ty_metadata = unsafe { llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), name.as_ptr(), size.bits(), align.bits() as u32, encoding) }; return ty_metadata; } fn foreign_type_metadata( cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>, unique_type_id: UniqueTypeId, ) -> &'ll DIType { debug!("foreign_type_metadata: {:?}", t); let name = compute_debuginfo_type_name(cx.tcx, t, false); create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA) } fn pointer_type_metadata( cx: &CodegenCx<'ll, 'tcx>, pointer_type: Ty<'tcx>, pointee_type_metadata: &'ll DIType, ) -> &'ll DIType { let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type); let name = compute_debuginfo_type_name(cx.tcx, pointer_type, false); let name = SmallCStr::new(&name); unsafe { llvm::LLVMRustDIBuilderCreatePointerType( DIB(cx), pointee_type_metadata, pointer_size.bits(), pointer_align.bits() as u32, name.as_ptr()) } } pub fn compile_unit_metadata( tcx: TyCtxt<'_>, codegen_unit_name: &str, debug_context: &CrateDebugContext<'ll, '_>, ) -> &'ll DIDescriptor { let mut name_in_debuginfo = match tcx.sess.local_crate_source_file { Some(ref path) => path.clone(), None => PathBuf::from(&*tcx.crate_name(LOCAL_CRATE).as_str()), }; // The OSX linker has an idiosyncrasy where it will ignore some debuginfo // if multiple object files with the same DW_AT_name are linked together. // As a workaround we generate unique names for each object file. Those do // not correspond to an actual source file but that should be harmless. if tcx.sess.target.target.options.is_like_osx { name_in_debuginfo.push("@"); name_in_debuginfo.push(codegen_unit_name); } debug!("compile_unit_metadata: {:?}", name_in_debuginfo); // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice. let producer = format!("clang LLVM (rustc version {})", (option_env!("CFG_VERSION")).expect("CFG_VERSION")); let name_in_debuginfo = name_in_debuginfo.to_string_lossy(); let name_in_debuginfo = SmallCStr::new(&name_in_debuginfo); let work_dir = SmallCStr::new(&tcx.sess.working_dir.0.to_string_lossy()); let producer = CString::new(producer).unwrap(); let flags = "\0"; let split_name = "\0"; // FIXME(#60020): // // This should actually be // // ``` // let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo); // ``` // // that is, we should set LLVM's emission kind to `LineTablesOnly` if // we are compiling with "limited" debuginfo. However, some of the // existing tools relied on slightly more debuginfo being generated than // would be the case with `LineTablesOnly`, and we did not want to break // these tools in a "drive-by fix", without a good idea or plan about // what limited debuginfo should exactly look like. So for now we keep // the emission kind as `FullDebug`. // // See https://github.com/rust-lang/rust/issues/60020 for details. let kind = DebugEmissionKind::FullDebug; assert!(tcx.sess.opts.debuginfo != DebugInfo::None); unsafe { let file_metadata = llvm::LLVMRustDIBuilderCreateFile( debug_context.builder, name_in_debuginfo.as_ptr(), work_dir.as_ptr()); let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit( debug_context.builder, DW_LANG_RUST, file_metadata, producer.as_ptr(), tcx.sess.opts.optimize != config::OptLevel::No, flags.as_ptr() as *const _, 0, split_name.as_ptr() as *const _, kind); if tcx.sess.opts.debugging_opts.profile { let cu_desc_metadata = llvm::LLVMRustMetadataAsValue(debug_context.llcontext, unit_metadata); let gcov_cu_info = [ path_to_mdstring(debug_context.llcontext, &tcx.output_filenames(LOCAL_CRATE).with_extension("gcno")), path_to_mdstring(debug_context.llcontext, &tcx.output_filenames(LOCAL_CRATE).with_extension("gcda")), cu_desc_metadata, ]; let gcov_metadata = llvm::LLVMMDNodeInContext(debug_context.llcontext, gcov_cu_info.as_ptr(), gcov_cu_info.len() as c_uint); let llvm_gcov_ident = const_cstr!("llvm.gcov"); llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, llvm_gcov_ident.as_ptr(), gcov_metadata); } return unit_metadata; }; fn path_to_mdstring(llcx: &'ll llvm::Context, path: &Path) -> &'ll Value { let path_str = path_to_c_string(path); unsafe { llvm::LLVMMDStringInContext(llcx, path_str.as_ptr(), path_str.as_bytes().len() as c_uint) } } } struct MetadataCreationResult<'ll> { metadata: &'ll DIType, already_stored_in_typemap: bool } impl MetadataCreationResult<'ll> { fn new(metadata: &'ll DIType, already_stored_in_typemap: bool) -> Self { MetadataCreationResult { metadata, already_stored_in_typemap, } } } // Description of a type member, which can either be a regular field (as in // structs or tuples) or an enum variant. #[derive(Debug)] struct MemberDescription<'ll> { name: String, type_metadata: &'ll DIType, offset: Size, size: Size, align: Align, flags: DIFlags, discriminant: Option<u64>, } impl<'ll> MemberDescription<'ll> { fn into_metadata(self, cx: &CodegenCx<'ll, '_>, composite_type_metadata: &'ll DIScope) -> &'ll DIType { let member_name = CString::new(self.name).unwrap(); unsafe { llvm::LLVMRustDIBuilderCreateVariantMemberType( DIB(cx), composite_type_metadata, member_name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, self.size.bits(), self.align.bits() as u32, self.offset.bits(), match self.discriminant { None => None, Some(value) => Some(cx.const_u64(value)), }, self.flags, self.type_metadata) } } } // A factory for MemberDescriptions. It produces a list of member descriptions // for some record-like type. MemberDescriptionFactories are used to defer the // creation of type member descriptions in order to break cycles arising from // recursive type definitions. enum MemberDescriptionFactory<'ll, 'tcx> { StructMDF(StructMemberDescriptionFactory<'tcx>), TupleMDF(TupleMemberDescriptionFactory<'tcx>), EnumMDF(EnumMemberDescriptionFactory<'ll, 'tcx>), UnionMDF(UnionMemberDescriptionFactory<'tcx>), VariantMDF(VariantMemberDescriptionFactory<'ll, 'tcx>) } impl MemberDescriptionFactory<'ll, 'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> { match *self { StructMDF(ref this) => { this.create_member_descriptions(cx) } TupleMDF(ref this) => { this.create_member_descriptions(cx) } EnumMDF(ref this) => { this.create_member_descriptions(cx) } UnionMDF(ref this) => { this.create_member_descriptions(cx) } VariantMDF(ref this) => { this.create_member_descriptions(cx) } } } } //=----------------------------------------------------------------------------- // Structs //=----------------------------------------------------------------------------- // Creates MemberDescriptions for the fields of a struct struct StructMemberDescriptionFactory<'tcx> { ty: Ty<'tcx>, variant: &'tcx ty::VariantDef, span: Span, } impl<'tcx> StructMemberDescriptionFactory<'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> { let layout = cx.layout_of(self.ty); self.variant.fields.iter().enumerate().map(|(i, f)| { let name = if self.variant.ctor_kind == CtorKind::Fn { format!("__{}", i) } else { f.ident.to_string() }; let field = layout.field(cx, i); MemberDescription { name, type_metadata: type_metadata(cx, field.ty, self.span), offset: layout.fields.offset(i), size: field.size, align: field.align.abi, flags: DIFlags::FlagZero, discriminant: None, } }).collect() } } fn prepare_struct_metadata( cx: &CodegenCx<'ll, 'tcx>, struct_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span, ) -> RecursiveTypeDescription<'ll, 'tcx> { let struct_name = compute_debuginfo_type_name(cx.tcx, struct_type, false); let (struct_def_id, variant) = match struct_type.sty { ty::Adt(def, _) => (def.did, def.non_enum_variant()), _ => bug!("prepare_struct_metadata on a non-ADT") }; let containing_scope = get_namespace_for_item(cx, struct_def_id); let struct_metadata_stub = create_struct_stub(cx, struct_type, &struct_name, unique_type_id, Some(containing_scope)); create_and_register_recursive_type_forward_declaration( cx, struct_type, unique_type_id, struct_metadata_stub, struct_metadata_stub, StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant, span, }) ) } //=----------------------------------------------------------------------------- // Tuples //=----------------------------------------------------------------------------- // Creates MemberDescriptions for the fields of a tuple struct TupleMemberDescriptionFactory<'tcx> { ty: Ty<'tcx>, component_types: Vec<Ty<'tcx>>, span: Span, } impl<'tcx> TupleMemberDescriptionFactory<'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> { let layout = cx.layout_of(self.ty); self.component_types.iter().enumerate().map(|(i, &component_type)| { let (size, align) = cx.size_and_align_of(component_type); MemberDescription { name: format!("__{}", i), type_metadata: type_metadata(cx, component_type, self.span), offset: layout.fields.offset(i), size, align, flags: DIFlags::FlagZero, discriminant: None, } }).collect() } } fn prepare_tuple_metadata( cx: &CodegenCx<'ll, 'tcx>, tuple_type: Ty<'tcx>, component_types: &[Ty<'tcx>], unique_type_id: UniqueTypeId, span: Span, ) -> RecursiveTypeDescription<'ll, 'tcx> { let tuple_name = compute_debuginfo_type_name(cx.tcx, tuple_type, false); let struct_stub = create_struct_stub(cx, tuple_type, &tuple_name[..], unique_type_id, NO_SCOPE_METADATA); create_and_register_recursive_type_forward_declaration( cx, tuple_type, unique_type_id, struct_stub, struct_stub, TupleMDF(TupleMemberDescriptionFactory { ty: tuple_type, component_types: component_types.to_vec(), span, }) ) } //=----------------------------------------------------------------------------- // Unions //=----------------------------------------------------------------------------- struct UnionMemberDescriptionFactory<'tcx> { layout: TyLayout<'tcx>, variant: &'tcx ty::VariantDef, span: Span, } impl<'tcx> UnionMemberDescriptionFactory<'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> { self.variant.fields.iter().enumerate().map(|(i, f)| { let field = self.layout.field(cx, i); MemberDescription { name: f.ident.to_string(), type_metadata: type_metadata(cx, field.ty, self.span), offset: Size::ZERO, size: field.size, align: field.align.abi, flags: DIFlags::FlagZero, discriminant: None, } }).collect() } } fn prepare_union_metadata( cx: &CodegenCx<'ll, 'tcx>, union_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span, ) -> RecursiveTypeDescription<'ll, 'tcx> { let union_name = compute_debuginfo_type_name(cx.tcx, union_type, false); let (union_def_id, variant) = match union_type.sty { ty::Adt(def, _) => (def.did, def.non_enum_variant()), _ => bug!("prepare_union_metadata on a non-ADT") }; let containing_scope = get_namespace_for_item(cx, union_def_id); let union_metadata_stub = create_union_stub(cx, union_type, &union_name, unique_type_id, containing_scope); create_and_register_recursive_type_forward_declaration( cx, union_type, unique_type_id, union_metadata_stub, union_metadata_stub, UnionMDF(UnionMemberDescriptionFactory { layout: cx.layout_of(union_type), variant, span, }) ) } //=----------------------------------------------------------------------------- // Enums //=----------------------------------------------------------------------------- // DWARF variant support is only available starting in LLVM 8. // Although the earlier enum debug info output did not work properly // in all situations, it is better for the time being to continue to // sometimes emit the old style rather than emit something completely // useless when rust is compiled against LLVM 6 or older. LLVM 7 // contains an early version of the DWARF variant support, and will // crash when handling the new debug info format. This function // decides which representation will be emitted. fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool { // On MSVC we have to use the fallback mode, because LLVM doesn't // lower variant parts to PDB. return cx.sess().target.target.options.is_like_msvc // LLVM version 7 did not release with an important bug fix; // but the required patch is in the LLVM 8. Rust LLVM reports // 8 as well. || llvm_util::get_major_version() < 8; } // Describes the members of an enum value: An enum is described as a union of // structs in DWARF. This MemberDescriptionFactory provides the description for // the members of this union; so for every variant of the given enum, this // factory will produce one MemberDescription (all with no name and a fixed // offset of zero bytes). struct EnumMemberDescriptionFactory<'ll, 'tcx> { enum_type: Ty<'tcx>, layout: TyLayout<'tcx>, discriminant_type_metadata: Option<&'ll DIType>, containing_scope: &'ll DIScope, span: Span, } impl EnumMemberDescriptionFactory<'ll, 'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> { let variant_info_for = |index: VariantIdx| { match &self.enum_type.sty { ty::Adt(adt, _) => VariantInfo::Adt(&adt.variants[index]), ty::Generator(def_id, substs, _) => { let generator_layout = cx.tcx.generator_layout(*def_id); VariantInfo::Generator(*substs, generator_layout, index) } _ => bug!(), } }; // This will always find the metadata in the type map. let fallback = use_enum_fallback(cx); let self_metadata = if fallback { self.containing_scope } else { type_metadata(cx, self.enum_type, self.span) }; match self.layout.variants { layout::Variants::Single { index } => { if let ty::Adt(adt, _) = &self.enum_type.sty { if adt.variants.is_empty() { return vec![]; } } let variant_info = variant_info_for(index); let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, self.layout, variant_info, NoDiscriminant, self_metadata, self.span); let member_descriptions = member_description_factory.create_member_descriptions(cx); set_members_of_composite_type(cx, self.enum_type, variant_type_metadata, member_descriptions); vec![ MemberDescription { name: if fallback { String::new() } else { variant_info.variant_name() }, type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, align: self.layout.align.abi, flags: DIFlags::FlagZero, discriminant: None, } ] } layout::Variants::Multiple { discr_kind: layout::DiscriminantKind::Tag, discr_index, ref variants, .. } => { let discriminant_info = if fallback { RegularDiscriminant { discr_field: Field::from(discr_index), discr_type_metadata: self.discriminant_type_metadata.unwrap() } } else { // This doesn't matter in this case. NoDiscriminant }; variants.iter_enumerated().map(|(i, _)| { let variant = self.layout.for_variant(cx, i); let variant_info = variant_info_for(i); let (variant_type_metadata, member_desc_factory) = describe_enum_variant(cx, variant, variant_info, discriminant_info, self_metadata, self.span); let member_descriptions = member_desc_factory .create_member_descriptions(cx); set_members_of_composite_type(cx, self.enum_type, variant_type_metadata, member_descriptions); MemberDescription { name: if fallback { String::new() } else { variant_info.variant_name() }, type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, align: self.layout.align.abi, flags: DIFlags::FlagZero, discriminant: Some( self.layout.ty.discriminant_for_variant(cx.tcx, i).unwrap().val as u64 ), } }).collect() } layout::Variants::Multiple { discr_kind: layout::DiscriminantKind::Niche { ref niche_variants, niche_start, dataful_variant, }, ref discr, ref variants, discr_index, } => { if fallback { let variant = self.layout.for_variant(cx, dataful_variant); // Create a description of the non-null variant let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, variant, variant_info_for(dataful_variant), OptimizedDiscriminant, self.containing_scope, self.span); let variant_member_descriptions = member_description_factory.create_member_descriptions(cx); set_members_of_composite_type(cx, self.enum_type, variant_type_metadata, variant_member_descriptions); // Encode the information about the null variant in the union // member's name. let mut name = String::from("RUST$ENCODED$ENUM$"); // Right now it's not even going to work for `niche_start > 0`, // and for multiple niche variants it only supports the first. fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, name: &mut String, layout: TyLayout<'tcx>, offset: Size, size: Size) { for i in 0..layout.fields.count() { let field_offset = layout.fields.offset(i); if field_offset > offset { continue; } let inner_offset = offset - field_offset; let field = layout.field(cx, i); if inner_offset + size <= field.size { write!(name, "{}$", i).unwrap(); compute_field_path(cx, name, field, inner_offset, size); } } } compute_field_path(cx, &mut name, self.layout, self.layout.fields.offset(discr_index), self.layout.field(cx, discr_index).size); variant_info_for(*niche_variants.start()).map_struct_name(|variant_name| { name.push_str(variant_name); }); // Create the (singleton) list of descriptions of union members. vec![ MemberDescription { name, type_metadata: variant_type_metadata, offset: Size::ZERO, size: variant.size, align: variant.align.abi, flags: DIFlags::FlagZero, discriminant: None, } ] } else { variants.iter_enumerated().map(|(i, _)| { let variant = self.layout.for_variant(cx, i); let variant_info = variant_info_for(i); let (variant_type_metadata, member_desc_factory) = describe_enum_variant(cx, variant, variant_info, OptimizedDiscriminant, self_metadata, self.span); let member_descriptions = member_desc_factory .create_member_descriptions(cx); set_members_of_composite_type(cx, self.enum_type, variant_type_metadata, member_descriptions); let niche_value = if i == dataful_variant { None } else { let value = (i.as_u32() as u128) .wrapping_sub(niche_variants.start().as_u32() as u128) .wrapping_add(niche_start); let value = truncate(value, discr.value.size(cx)); // NOTE(eddyb) do *NOT* remove this assert, until // we pass the full 128-bit value to LLVM, otherwise // truncation will be silent and remain undetected. assert_eq!(value as u64 as u128, value); Some(value as u64) }; MemberDescription { name: variant_info.variant_name(), type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, align: self.layout.align.abi, flags: DIFlags::FlagZero, discriminant: niche_value, } }).collect() } } } } } // Creates MemberDescriptions for the fields of a single enum variant. struct VariantMemberDescriptionFactory<'ll, 'tcx> { // Cloned from the layout::Struct describing the variant. offsets: Vec<layout::Size>, args: Vec<(String, Ty<'tcx>)>, discriminant_type_metadata: Option<&'ll DIType>, span: Span, } impl VariantMemberDescriptionFactory<'ll, 'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> { self.args.iter().enumerate().map(|(i, &(ref name, ty))| { let (size, align) = cx.size_and_align_of(ty); MemberDescription { name: name.to_string(), type_metadata: if use_enum_fallback(cx) { match self.discriminant_type_metadata { // Discriminant is always the first field of our variant // when using the enum fallback. Some(metadata) if i == 0 => metadata, _ => type_metadata(cx, ty, self.span) } } else { type_metadata(cx, ty, self.span) }, offset: self.offsets[i], size, align, flags: DIFlags::FlagZero, discriminant: None, } }).collect() } } #[derive(Copy, Clone)] enum EnumDiscriminantInfo<'ll> { RegularDiscriminant{ discr_field: Field, discr_type_metadata: &'ll DIType }, OptimizedDiscriminant, NoDiscriminant } #[derive(Copy, Clone)] enum VariantInfo<'tcx> { Adt(&'tcx ty::VariantDef), Generator(ty::GeneratorSubsts<'tcx>, &'tcx GeneratorLayout<'tcx>, VariantIdx), } impl<'tcx> VariantInfo<'tcx> { fn map_struct_name<R>(&self, f: impl FnOnce(&str) -> R) -> R { match self { VariantInfo::Adt(variant) => f(&variant.ident.as_str()), VariantInfo::Generator(substs, _, variant_index) => f(&substs.variant_name(*variant_index)), } } fn variant_name(&self) -> String { match self { VariantInfo::Adt(variant) => variant.ident.to_string(), VariantInfo::Generator(_, _, variant_index) => { // Since GDB currently prints out the raw discriminant along // with every variant, make each variant name be just the value // of the discriminant. The struct name for the variant includes // the actual variant description. format!("{}", variant_index.as_usize()) } } } fn field_name(&self, i: usize) -> String { let field_name = match self { VariantInfo::Adt(variant) if variant.ctor_kind != CtorKind::Fn => Some(variant.fields[i].ident.to_string()), VariantInfo::Generator(_, generator_layout, variant_index) => { let field = generator_layout.variant_fields[*variant_index][i.into()]; let decl = &generator_layout.__local_debuginfo_codegen_only_do_not_use[field]; decl.name.map(|name| name.to_string()) } _ => None, }; field_name.unwrap_or_else(|| format!("__{}", i)) } } // Returns a tuple of (1) type_metadata_stub of the variant, (2) a // MemberDescriptionFactory for producing the descriptions of the // fields of the variant. This is a rudimentary version of a full // RecursiveTypeDescription. fn describe_enum_variant( cx: &CodegenCx<'ll, 'tcx>, layout: layout::TyLayout<'tcx>, variant: VariantInfo<'tcx>, discriminant_info: EnumDiscriminantInfo<'ll>, containing_scope: &'ll DIScope, span: Span, ) -> (&'ll DICompositeType, MemberDescriptionFactory<'ll, 'tcx>) { let metadata_stub = variant.map_struct_name(|variant_name| { let unique_type_id = debug_context(cx).type_map .borrow_mut() .get_unique_type_id_of_enum_variant( cx, layout.ty, &variant_name); create_struct_stub(cx, layout.ty, &variant_name, unique_type_id, Some(containing_scope)) }); // Build an array of (field name, field type) pairs to be captured in the factory closure. let (offsets, args) = if use_enum_fallback(cx) { // If this is not a univariant enum, there is also the discriminant field. let (discr_offset, discr_arg) = match discriminant_info { RegularDiscriminant { discr_field, .. } => { // We have the layout of an enum variant, we need the layout of the outer enum let enum_layout = cx.layout_of(layout.ty); let offset = enum_layout.fields.offset(discr_field.as_usize()); let args = ( "RUST$ENUM$DISR".to_owned(), enum_layout.field(cx, discr_field.as_usize()).ty); (Some(offset), Some(args)) } _ => (None, None), }; ( discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| { layout.fields.offset(i) })).collect(), discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| { (variant.field_name(i), layout.field(cx, i).ty) })).collect() ) } else { ( (0..layout.fields.count()).map(|i| { layout.fields.offset(i) }).collect(), (0..layout.fields.count()).map(|i| { (variant.field_name(i), layout.field(cx, i).ty) }).collect() ) }; let member_description_factory = VariantMDF(VariantMemberDescriptionFactory { offsets, args, discriminant_type_metadata: match discriminant_info { RegularDiscriminant { discr_type_metadata, .. } => { Some(discr_type_metadata) } _ => None }, span, }); (metadata_stub, member_description_factory) } fn prepare_enum_metadata( cx: &CodegenCx<'ll, 'tcx>, enum_type: Ty<'tcx>, enum_def_id: DefId, unique_type_id: UniqueTypeId, span: Span, outer_field_tys: Vec<Ty<'tcx>>, ) -> RecursiveTypeDescription<'ll, 'tcx> { let enum_name = compute_debuginfo_type_name(cx.tcx, enum_type, false); let containing_scope = get_namespace_for_item(cx, enum_def_id); // FIXME: This should emit actual file metadata for the enum, but we // currently can't get the necessary information when it comes to types // imported from other crates. Formerly we violated the ODR when performing // LTO because we emitted debuginfo for the same type with varying file // metadata, so as a workaround we pretend that the type comes from // <unknown> let file_metadata = unknown_file_metadata(cx); let discriminant_type_metadata = |discr: layout::Primitive| { let enumerators_metadata: Vec<_> = match enum_type.sty { ty::Adt(def, _) => def .discriminants(cx.tcx) .zip(&def.variants) .map(|((_, discr), v)| { let name = SmallCStr::new(&v.ident.as_str()); unsafe { Some(llvm::LLVMRustDIBuilderCreateEnumerator( DIB(cx), name.as_ptr(), // FIXME: what if enumeration has i128 discriminant? discr.val as u64)) } }) .collect(), ty::Generator(_, substs, _) => substs .variant_range(enum_def_id, cx.tcx) .map(|variant_index| { let name = SmallCStr::new(&substs.variant_name(variant_index)); unsafe { Some(llvm::LLVMRustDIBuilderCreateEnumerator( DIB(cx), name.as_ptr(), // FIXME: what if enumeration has i128 discriminant? variant_index.as_usize() as u64)) } }) .collect(), _ => bug!(), }; let disr_type_key = (enum_def_id, discr); let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types .borrow() .get(&disr_type_key).cloned(); match cached_discriminant_type_metadata { Some(discriminant_type_metadata) => discriminant_type_metadata, None => { let (discriminant_size, discriminant_align) = (discr.size(cx), discr.align(cx)); let discriminant_base_type_metadata = type_metadata(cx, discr.to_ty(cx.tcx), syntax_pos::DUMMY_SP); let discriminant_name = match enum_type.sty { ty::Adt(..) => SmallCStr::new(&cx.tcx.item_name(enum_def_id).as_str()), ty::Generator(..) => SmallCStr::new(&enum_name), _ => bug!(), }; let discriminant_type_metadata = unsafe { llvm::LLVMRustDIBuilderCreateEnumerationType( DIB(cx), containing_scope, discriminant_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, discriminant_size.bits(), discriminant_align.abi.bits() as u32, create_DIArray(DIB(cx), &enumerators_metadata), discriminant_base_type_metadata, true) }; debug_context(cx).created_enum_disr_types .borrow_mut() .insert(disr_type_key, discriminant_type_metadata); discriminant_type_metadata } } }; let layout = cx.layout_of(enum_type); match (&layout.abi, &layout.variants) { (&layout::Abi::Scalar(_), &layout::Variants::Multiple { discr_kind: layout::DiscriminantKind::Tag, ref discr, .. }) => return FinalMetadata(discriminant_type_metadata(discr.value)), _ => {} } let enum_name = SmallCStr::new(&enum_name); let unique_type_id_str = SmallCStr::new( debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) ); if use_enum_fallback(cx) { let discriminant_type_metadata = match layout.variants { layout::Variants::Single { .. } | layout::Variants::Multiple { discr_kind: layout::DiscriminantKind::Niche { .. }, .. } => None, layout::Variants::Multiple { discr_kind: layout::DiscriminantKind::Tag, ref discr, .. } => { Some(discriminant_type_metadata(discr.value)) } }; let enum_metadata = unsafe { llvm::LLVMRustDIBuilderCreateUnionType( DIB(cx), containing_scope, enum_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, layout.size.bits(), layout.align.abi.bits() as u32, DIFlags::FlagZero, None, 0, // RuntimeLang unique_type_id_str.as_ptr()) }; return create_and_register_recursive_type_forward_declaration( cx, enum_type, unique_type_id, enum_metadata, enum_metadata, EnumMDF(EnumMemberDescriptionFactory { enum_type, layout, discriminant_type_metadata, containing_scope, span, }), ); } let discriminator_name = match &enum_type.sty { ty::Generator(..) => Some(SmallCStr::new(&"__state")), _ => None, }; let discriminator_name = discriminator_name.map(|n| n.as_ptr()).unwrap_or(ptr::null_mut()); let discriminator_metadata = match layout.variants { // A single-variant enum has no discriminant. layout::Variants::Single { .. } => None, layout::Variants::Multiple { discr_kind: layout::DiscriminantKind::Niche { .. }, ref discr, discr_index, .. } => { // Find the integer type of the correct size. let size = discr.value.size(cx); let align = discr.value.align(cx); let discr_type = match discr.value { layout::Int(t, _) => t, layout::Float(layout::FloatTy::F32) => Integer::I32, layout::Float(layout::FloatTy::F64) => Integer::I64, layout::Pointer => cx.data_layout().ptr_sized_integer(), }.to_ty(cx.tcx, false); let discr_metadata = basic_type_metadata(cx, discr_type); unsafe { Some(llvm::LLVMRustDIBuilderCreateMemberType( DIB(cx), containing_scope, discriminator_name, file_metadata, UNKNOWN_LINE_NUMBER, size.bits(), align.abi.bits() as u32, layout.fields.offset(discr_index).bits(), DIFlags::FlagArtificial, discr_metadata)) } }, layout::Variants::Multiple { discr_kind: layout::DiscriminantKind::Tag, ref discr, discr_index, .. } => { let discr_type = discr.value.to_ty(cx.tcx); let (size, align) = cx.size_and_align_of(discr_type); let discr_metadata = basic_type_metadata(cx, discr_type); unsafe { Some(llvm::LLVMRustDIBuilderCreateMemberType( DIB(cx), containing_scope, discriminator_name, file_metadata, UNKNOWN_LINE_NUMBER, size.bits(), align.bits() as u32, layout.fields.offset(discr_index).bits(), DIFlags::FlagArtificial, discr_metadata)) } }, }; let mut outer_fields = match layout.variants { layout::Variants::Single { .. } => vec![], layout::Variants::Multiple { .. } => { let tuple_mdf = TupleMemberDescriptionFactory { ty: enum_type, component_types: outer_field_tys, span }; tuple_mdf .create_member_descriptions(cx) .into_iter() .map(|desc| Some(desc.into_metadata(cx, containing_scope))) .collect() } }; let variant_part_unique_type_id_str = SmallCStr::new( debug_context(cx).type_map .borrow_mut() .get_unique_type_id_str_of_enum_variant_part(unique_type_id) ); let empty_array = create_DIArray(DIB(cx), &[]); let variant_part = unsafe { llvm::LLVMRustDIBuilderCreateVariantPart( DIB(cx), containing_scope, ptr::null_mut(), file_metadata, UNKNOWN_LINE_NUMBER, layout.size.bits(), layout.align.abi.bits() as u32, DIFlags::FlagZero, discriminator_metadata, empty_array, variant_part_unique_type_id_str.as_ptr()) }; outer_fields.push(Some(variant_part)); // The variant part must be wrapped in a struct according to DWARF. let type_array = create_DIArray(DIB(cx), &outer_fields); let struct_wrapper = unsafe { llvm::LLVMRustDIBuilderCreateStructType( DIB(cx), Some(containing_scope), enum_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, layout.size.bits(), layout.align.abi.bits() as u32, DIFlags::FlagZero, None, type_array, 0, None, unique_type_id_str.as_ptr()) }; return create_and_register_recursive_type_forward_declaration( cx, enum_type, unique_type_id, struct_wrapper, variant_part, EnumMDF(EnumMemberDescriptionFactory { enum_type, layout, discriminant_type_metadata: None, containing_scope, span, }), ); } /// Creates debug information for a composite type, that is, anything that /// results in a LLVM struct. /// /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. fn composite_type_metadata( cx: &CodegenCx<'ll, 'tcx>, composite_type: Ty<'tcx>, composite_type_name: &str, composite_type_unique_id: UniqueTypeId, member_descriptions: Vec<MemberDescription<'ll>>, containing_scope: Option<&'ll DIScope>, // Ignore source location information as long as it // can't be reconstructed for non-local crates. _file_metadata: &'ll DIFile, _definition_span: Span, ) -> &'ll DICompositeType { // Create the (empty) struct metadata node ... let composite_type_metadata = create_struct_stub(cx, composite_type, composite_type_name, composite_type_unique_id, containing_scope); // ... and immediately create and add the member descriptions. set_members_of_composite_type(cx, composite_type, composite_type_metadata, member_descriptions); composite_type_metadata } fn set_members_of_composite_type(cx: &CodegenCx<'ll, 'tcx>, composite_type: Ty<'tcx>, composite_type_metadata: &'ll DICompositeType, member_descriptions: Vec<MemberDescription<'ll>>) { // In some rare cases LLVM metadata uniquing would lead to an existing type // description being used instead of a new one created in // create_struct_stub. This would cause a hard to trace assertion in // DICompositeType::SetTypeArray(). The following check makes sure that we // get a better error message if this should happen again due to some // regression. { let mut composite_types_completed = debug_context(cx).composite_types_completed.borrow_mut(); if composite_types_completed.contains(&composite_type_metadata) { bug!("debuginfo::set_members_of_composite_type() - \ Already completed forward declaration re-encountered."); } else { composite_types_completed.insert(composite_type_metadata); } } let member_metadata: Vec<_> = member_descriptions .into_iter() .map(|desc| Some(desc.into_metadata(cx, composite_type_metadata))) .collect(); let type_params = compute_type_parameters(cx, composite_type); unsafe { let type_array = create_DIArray(DIB(cx), &member_metadata[..]); llvm::LLVMRustDICompositeTypeReplaceArrays( DIB(cx), composite_type_metadata, Some(type_array), type_params); } } // Compute the type parameters for a type, if any, for the given // metadata. fn compute_type_parameters(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>) -> Option<&'ll DIArray> { if let ty::Adt(def, substs) = ty.sty { if !substs.types().next().is_none() { let generics = cx.tcx.generics_of(def.did); let names = get_parameter_names(cx, generics); let template_params: Vec<_> = substs.iter().zip(names).filter_map(|(kind, name)| { if let UnpackedKind::Type(ty) = kind.unpack() { let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); let actual_type_metadata = type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); let name = SmallCStr::new(&name.as_str()); Some(unsafe { Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( DIB(cx), None, name.as_ptr(), actual_type_metadata, unknown_file_metadata(cx), 0, 0, )) }) } else { None } }).collect(); return Some(create_DIArray(DIB(cx), &template_params[..])); } } return Some(create_DIArray(DIB(cx), &[])); fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<InternedString> { let mut names = generics.parent.map_or(vec![], |def_id| { get_parameter_names(cx, cx.tcx.generics_of(def_id)) }); names.extend(generics.params.iter().map(|param| param.name)); names } } // A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do // any caching, does not add any fields to the struct. This can be done later // with set_members_of_composite_type(). fn create_struct_stub( cx: &CodegenCx<'ll, 'tcx>, struct_type: Ty<'tcx>, struct_type_name: &str, unique_type_id: UniqueTypeId, containing_scope: Option<&'ll DIScope>, ) -> &'ll DICompositeType { let (struct_size, struct_align) = cx.size_and_align_of(struct_type); let name = SmallCStr::new(struct_type_name); let unique_type_id = SmallCStr::new( debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) ); let metadata_stub = unsafe { // LLVMRustDIBuilderCreateStructType() wants an empty array. A null // pointer will lead to hard to trace and debug LLVM assertions // later on in llvm/lib/IR/Value.cpp. let empty_array = create_DIArray(DIB(cx), &[]); llvm::LLVMRustDIBuilderCreateStructType( DIB(cx), containing_scope, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, struct_size.bits(), struct_align.bits() as u32, DIFlags::FlagZero, None, empty_array, 0, None, unique_type_id.as_ptr()) }; metadata_stub } fn create_union_stub( cx: &CodegenCx<'ll, 'tcx>, union_type: Ty<'tcx>, union_type_name: &str, unique_type_id: UniqueTypeId, containing_scope: &'ll DIScope, ) -> &'ll DICompositeType { let (union_size, union_align) = cx.size_and_align_of(union_type); let name = SmallCStr::new(union_type_name); let unique_type_id = SmallCStr::new( debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) ); let metadata_stub = unsafe { // LLVMRustDIBuilderCreateUnionType() wants an empty array. A null // pointer will lead to hard to trace and debug LLVM assertions // later on in llvm/lib/IR/Value.cpp. let empty_array = create_DIArray(DIB(cx), &[]); llvm::LLVMRustDIBuilderCreateUnionType( DIB(cx), containing_scope, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, union_size.bits(), union_align.bits() as u32, DIFlags::FlagZero, Some(empty_array), 0, // RuntimeLang unique_type_id.as_ptr()) }; metadata_stub } /// Creates debug information for the given global variable. /// /// Adds the created metadata nodes directly to the crate's IR. pub fn create_global_var_metadata( cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value, ) { if cx.dbg_cx.is_none() { return; } let tcx = cx.tcx; let attrs = tcx.codegen_fn_attrs(def_id); if attrs.flags.contains(CodegenFnAttrFlags::NO_DEBUG) { return; } let no_mangle = attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE); // We may want to remove the namespace scope if we're in an extern block, see: // https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952 let var_scope = get_namespace_for_item(cx, def_id); let span = tcx.def_span(def_id); let (file_metadata, line_number) = if !span.is_dummy() { let loc = span_start(cx, span); (file_metadata(cx, &loc.file.name, LOCAL_CRATE), loc.line as c_uint) } else { (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER) }; let is_local_to_unit = is_node_local_to_unit(cx, def_id); let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx); let type_metadata = type_metadata(cx, variable_type, span); let var_name = SmallCStr::new(&tcx.item_name(def_id).as_str()); let linkage_name = if no_mangle { None } else { let linkage_name = mangled_name_of_instance(cx, Instance::mono(tcx, def_id)); Some(SmallCStr::new(&linkage_name.as_str())) }; let global_align = cx.align_of(variable_type); unsafe { llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), Some(var_scope), var_name.as_ptr(), // If null, linkage_name field is omitted, // which is what we want for no_mangle statics linkage_name.as_ref() .map_or(ptr::null(), |name| name.as_ptr()), file_metadata, line_number, type_metadata, is_local_to_unit, global, None, global_align.bytes() as u32, ); } } /// Creates debug information for the given vtable, which is for the /// given type. /// /// Adds the created metadata nodes directly to the crate's IR. pub fn create_vtable_metadata(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>, vtable: &'ll Value) { if cx.dbg_cx.is_none() { return; } let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP); unsafe { // LLVMRustDIBuilderCreateStructType() wants an empty array. A null // pointer will lead to hard to trace and debug LLVM assertions // later on in llvm/lib/IR/Value.cpp. let empty_array = create_DIArray(DIB(cx), &[]); let name = const_cstr!("vtable"); // Create a new one each time. We don't want metadata caching // here, because each vtable will refer to a unique containing // type. let vtable_type = llvm::LLVMRustDIBuilderCreateStructType( DIB(cx), NO_SCOPE_METADATA, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, Size::ZERO.bits(), cx.tcx.data_layout.pointer_align.abi.bits() as u32, DIFlags::FlagArtificial, None, empty_array, 0, Some(type_metadata), name.as_ptr() ); llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), NO_SCOPE_METADATA, name.as_ptr(), ptr::null(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, vtable_type, true, vtable, None, 0); } } // Creates an "extension" of an existing DIScope into another file. pub fn extend_scope_to_file( cx: &CodegenCx<'ll, '_>, scope_metadata: &'ll DIScope, file: &syntax_pos::SourceFile, defining_crate: CrateNum, ) -> &'ll DILexicalBlock { let file_metadata = file_metadata(cx, &file.name, defining_crate); unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile( DIB(cx), scope_metadata, file_metadata) } }
38.810159
99
0.53078
ccdfd35f9621a1baa7148885bf174b6e7dd05226
1,606
pub struct IconWhereToVote { props: crate::Props, } impl yew::Component for IconWhereToVote { type Properties = crate::Props; type Message = (); fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self { Self { props } } fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender { true } fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender { false } fn view(&self) -> yew::prelude::Html { yew::prelude::html! { <svg class=self.props.class.unwrap_or("") width=self.props.size.unwrap_or(24).to_string() height=self.props.size.unwrap_or(24).to_string() viewBox="0 0 24 24" fill=self.props.fill.unwrap_or("none") stroke=self.props.color.unwrap_or("currentColor") stroke-width=self.props.stroke_width.unwrap_or(2).to_string() stroke-linecap=self.props.stroke_linecap.unwrap_or("round") stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round") > <svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M12 1C7.59 1 4 4.59 4 9c0 5.57 6.96 13.34 7.26 13.67l.74.82.74-.82C13.04 22.34 20 14.57 20 9c0-4.41-3.59-8-8-8zm0 19.47C9.82 17.86 6 12.54 6 9c0-3.31 2.69-6 6-6s6 2.69 6 6c0 3.83-4.25 9.36-6 11.47zm-1.53-9.3L8.71 9.4l-1.42 1.42L10.47 14l6.01-6.01-1.41-1.42z"/></svg> </svg> } } }
34.913043
409
0.579701
0eaf83d719cdfc90cd97c46f7bbe6eef99746a30
3,721
// Copyright 2020-2022 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] #![allow(deprecated)] #![cfg_attr(docsrs, feature(doc_cfg, extended_key_value_attributes))] #![cfg_attr(docsrs, cfg_attr(docsrs, doc = include_str!("../README.md")))] #![cfg_attr(not(docsrs), doc = "")] #![allow(clippy::upper_case_acronyms)] #![warn( rust_2018_idioms, unreachable_pub, missing_docs, rustdoc::missing_crate_level_docs, rustdoc::broken_intra_doc_links, rustdoc::private_intra_doc_links, rustdoc::private_doc_tests, clippy::missing_safety_doc, clippy::missing_errors_doc )] pub mod core { //! Core Traits and Types pub use identity_core::common::*; pub use identity_core::convert::*; pub use identity_core::error::*; pub use identity_core::utils::*; #[deprecated(since = "0.5.0", note = "diff chain features are slated for removal")] #[doc(inline)] pub use identity_core::diff; #[doc(inline)] pub use identity_core::json; } pub mod crypto { //! Cryptographic Utilities pub use identity_core::crypto::*; } pub mod credential { //! Verifiable Credentials //! //! [Specification](https://www.w3.org/TR/vc-data-model/) pub use identity_credential::credential::*; pub use identity_credential::error::*; pub use identity_credential::presentation::*; } pub mod did { //! Decentralized Identifiers //! //! [Specification](https://www.w3.org/TR/did-core/) pub use identity_did::document::*; pub use identity_did::error::*; pub use identity_did::service::*; pub use identity_did::utils::*; pub use identity_did::verification::*; pub use identity_did::did::*; pub use identity_did::resolution; pub use identity_did::verifiable; } pub mod iota { //! IOTA Tangle DID Method pub use identity_iota::chain::*; pub use identity_iota::credential::*; pub use identity_iota::document::*; pub use identity_iota::error::*; pub use identity_iota::tangle::*; } pub mod iota_core { //! IOTA Core Traits and Types definitions pub use identity_iota_core::did::*; pub use identity_iota_core::diff::*; pub use identity_iota_core::document::*; pub use identity_iota_core::error::*; pub use identity_iota_core::tangle::*; #[doc(inline)] pub use identity_iota_core::try_construct_did; } #[cfg(feature = "account")] #[cfg_attr(docsrs, doc(cfg(feature = "account")))] pub mod account { //! Secure storage for Decentralized Identifiers pub use identity_account::account::*; pub use identity_account::error::*; pub use identity_account::types::*; pub use identity_account::updates::*; } #[cfg(feature = "account")] #[cfg_attr(docsrs, doc(cfg(feature = "account")))] pub mod account_storage { //! Storage Trait and Types definitions pub use identity_account_storage::crypto::*; pub use identity_account_storage::error::*; pub use identity_account_storage::identity::*; pub use identity_account_storage::storage::*; pub use identity_account_storage::types::*; pub use identity_account_storage::utils::*; } // #[cfg(feature = "comm")] // #[cfg_attr(docsrs, doc(cfg(feature = "comm")))] // pub mod comm { // //! DID Communications Message Specification // //! // //! [Specification](https://github.com/iotaledger/identity.rs/tree/dev/docs/DID%20Communications%20Research%20and%20Specification) // pub use identity_comm::envelope::*; // pub use identity_comm::error::*; // pub use identity_comm::message::*; // } pub mod prelude { //! Prelude of commonly used types pub use identity_core::crypto::KeyPair; pub use identity_core::crypto::KeyType; pub use identity_iota::tangle::Client; pub use identity_iota::Result; pub use identity_iota_core::document::IotaDocument; }
26.769784
135
0.706799
e24ae0237e8768160f4e98fe910c7f3723395ed1
26,577
#![allow( unused_parens, clippy::excessive_precision, clippy::missing_safety_doc, clippy::not_unsafe_ptr_arg_deref, clippy::should_implement_trait, clippy::too_many_arguments, clippy::unused_unit, )] //! # Super Resolution //! //! The Super Resolution module contains a set of functions and classes that can be used to solve the //! problem of resolution enhancement. There are a few methods implemented, most of them are described in //! the papers [Farsiu03](https://docs.opencv.org/4.5.2/d0/de3/citelist.html#CITEREF_Farsiu03) and [Mitzel09](https://docs.opencv.org/4.5.2/d0/de3/citelist.html#CITEREF_Mitzel09) . use crate::{mod_prelude::*, core, sys, types}; pub mod prelude { pub use { super::Superres_DenseOpticalFlowExt, super::Superres_FarnebackOpticalFlow, super::Superres_DualTVL1OpticalFlow, super::Superres_BroxOpticalFlow, super::Superres_PyrLKOpticalFlow, super::Superres_FrameSource, super::Superres_SuperResolution }; } /// ## C++ default parameters /// * device_id: 0 pub fn create_frame_source_camera(device_id: i32) -> Result<core::Ptr::<dyn crate::superres::Superres_FrameSource>> { unsafe { sys::cv_superres_createFrameSource_Camera_int(device_id) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_FrameSource>::opencv_from_extern(r) } ) } pub fn create_frame_source_empty() -> Result<core::Ptr::<dyn crate::superres::Superres_FrameSource>> { unsafe { sys::cv_superres_createFrameSource_Empty() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_FrameSource>::opencv_from_extern(r) } ) } pub fn create_frame_source_video_cuda(file_name: &str) -> Result<core::Ptr::<dyn crate::superres::Superres_FrameSource>> { extern_container_arg!(file_name); unsafe { sys::cv_superres_createFrameSource_Video_CUDA_const_StringR(file_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_FrameSource>::opencv_from_extern(r) } ) } pub fn create_frame_source_video(file_name: &str) -> Result<core::Ptr::<dyn crate::superres::Superres_FrameSource>> { extern_container_arg!(file_name); unsafe { sys::cv_superres_createFrameSource_Video_const_StringR(file_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_FrameSource>::opencv_from_extern(r) } ) } pub fn create_opt_flow_brox_cuda() -> Result<core::Ptr::<dyn crate::superres::Superres_BroxOpticalFlow>> { unsafe { sys::cv_superres_createOptFlow_Brox_CUDA() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_BroxOpticalFlow>::opencv_from_extern(r) } ) } pub fn create_opt_flow_dual_tvl1() -> Result<core::Ptr::<dyn crate::superres::Superres_DualTVL1OpticalFlow>> { unsafe { sys::cv_superres_createOptFlow_DualTVL1() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_DualTVL1OpticalFlow>::opencv_from_extern(r) } ) } pub fn create_opt_flow_dual_tvl1_cuda() -> Result<core::Ptr::<dyn crate::superres::Superres_DualTVL1OpticalFlow>> { unsafe { sys::cv_superres_createOptFlow_DualTVL1_CUDA() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_DualTVL1OpticalFlow>::opencv_from_extern(r) } ) } pub fn create_opt_flow_farneback() -> Result<core::Ptr::<dyn crate::superres::Superres_FarnebackOpticalFlow>> { unsafe { sys::cv_superres_createOptFlow_Farneback() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_FarnebackOpticalFlow>::opencv_from_extern(r) } ) } pub fn create_opt_flow_farneback_cuda() -> Result<core::Ptr::<dyn crate::superres::Superres_FarnebackOpticalFlow>> { unsafe { sys::cv_superres_createOptFlow_Farneback_CUDA() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_FarnebackOpticalFlow>::opencv_from_extern(r) } ) } pub fn create_opt_flow_pyr_lk_cuda() -> Result<core::Ptr::<dyn crate::superres::Superres_PyrLKOpticalFlow>> { unsafe { sys::cv_superres_createOptFlow_PyrLK_CUDA() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_PyrLKOpticalFlow>::opencv_from_extern(r) } ) } /// Create Bilateral TV-L1 Super Resolution. /// /// This class implements Super Resolution algorithm described in the papers [Farsiu03](https://docs.opencv.org/4.5.2/d0/de3/citelist.html#CITEREF_Farsiu03) and /// [Mitzel09](https://docs.opencv.org/4.5.2/d0/de3/citelist.html#CITEREF_Mitzel09) . /// /// Here are important members of the class that control the algorithm, which you can set after /// constructing the class instance: /// /// * **int scale** Scale factor. /// * **int iterations** Iteration count. /// * **double tau** Asymptotic value of steepest descent method. /// * **double lambda** Weight parameter to balance data term and smoothness term. /// * **double alpha** Parameter of spacial distribution in Bilateral-TV. /// * **int btvKernelSize** Kernel size of Bilateral-TV filter. /// * **int blurKernelSize** Gaussian blur kernel size. /// * **double blurSigma** Gaussian blur sigma. /// * **int temporalAreaRadius** Radius of the temporal search area. /// * **Ptr\<DenseOpticalFlowExt\> opticalFlow** Dense optical flow algorithm. pub fn create_super_resolution_btvl1() -> Result<core::Ptr::<dyn crate::superres::Superres_SuperResolution>> { unsafe { sys::cv_superres_createSuperResolution_BTVL1() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_SuperResolution>::opencv_from_extern(r) } ) } pub fn create_super_resolution_btvl1_cuda() -> Result<core::Ptr::<dyn crate::superres::Superres_SuperResolution>> { unsafe { sys::cv_superres_createSuperResolution_BTVL1_CUDA() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_SuperResolution>::opencv_from_extern(r) } ) } pub trait Superres_BroxOpticalFlow: crate::superres::Superres_DenseOpticalFlowExt { fn as_raw_Superres_BroxOpticalFlow(&self) -> *const c_void; fn as_raw_mut_Superres_BroxOpticalFlow(&mut self) -> *mut c_void; /// Flow smoothness /// ## See also /// setAlpha fn get_alpha(&self) -> Result<f64> { unsafe { sys::cv_superres_BroxOpticalFlow_getAlpha_const(self.as_raw_Superres_BroxOpticalFlow()) }.into_result() } /// Flow smoothness /// ## See also /// setAlpha getAlpha fn set_alpha(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_BroxOpticalFlow_setAlpha_double(self.as_raw_mut_Superres_BroxOpticalFlow(), val) }.into_result() } /// Gradient constancy importance /// ## See also /// setGamma fn get_gamma(&self) -> Result<f64> { unsafe { sys::cv_superres_BroxOpticalFlow_getGamma_const(self.as_raw_Superres_BroxOpticalFlow()) }.into_result() } /// Gradient constancy importance /// ## See also /// setGamma getGamma fn set_gamma(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_BroxOpticalFlow_setGamma_double(self.as_raw_mut_Superres_BroxOpticalFlow(), val) }.into_result() } /// Pyramid scale factor /// ## See also /// setScaleFactor fn get_scale_factor(&self) -> Result<f64> { unsafe { sys::cv_superres_BroxOpticalFlow_getScaleFactor_const(self.as_raw_Superres_BroxOpticalFlow()) }.into_result() } /// Pyramid scale factor /// ## See also /// setScaleFactor getScaleFactor fn set_scale_factor(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_BroxOpticalFlow_setScaleFactor_double(self.as_raw_mut_Superres_BroxOpticalFlow(), val) }.into_result() } /// Number of lagged non-linearity iterations (inner loop) /// ## See also /// setInnerIterations fn get_inner_iterations(&self) -> Result<i32> { unsafe { sys::cv_superres_BroxOpticalFlow_getInnerIterations_const(self.as_raw_Superres_BroxOpticalFlow()) }.into_result() } /// Number of lagged non-linearity iterations (inner loop) /// ## See also /// setInnerIterations getInnerIterations fn set_inner_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_BroxOpticalFlow_setInnerIterations_int(self.as_raw_mut_Superres_BroxOpticalFlow(), val) }.into_result() } /// Number of warping iterations (number of pyramid levels) /// ## See also /// setOuterIterations fn get_outer_iterations(&self) -> Result<i32> { unsafe { sys::cv_superres_BroxOpticalFlow_getOuterIterations_const(self.as_raw_Superres_BroxOpticalFlow()) }.into_result() } /// Number of warping iterations (number of pyramid levels) /// ## See also /// setOuterIterations getOuterIterations fn set_outer_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_BroxOpticalFlow_setOuterIterations_int(self.as_raw_mut_Superres_BroxOpticalFlow(), val) }.into_result() } /// Number of linear system solver iterations /// ## See also /// setSolverIterations fn get_solver_iterations(&self) -> Result<i32> { unsafe { sys::cv_superres_BroxOpticalFlow_getSolverIterations_const(self.as_raw_Superres_BroxOpticalFlow()) }.into_result() } /// Number of linear system solver iterations /// ## See also /// setSolverIterations getSolverIterations fn set_solver_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_BroxOpticalFlow_setSolverIterations_int(self.as_raw_mut_Superres_BroxOpticalFlow(), val) }.into_result() } } pub trait Superres_DenseOpticalFlowExt: core::AlgorithmTrait { fn as_raw_Superres_DenseOpticalFlowExt(&self) -> *const c_void; fn as_raw_mut_Superres_DenseOpticalFlowExt(&mut self) -> *mut c_void; /// ## C++ default parameters /// * flow2: noArray() fn calc(&mut self, frame0: &dyn core::ToInputArray, frame1: &dyn core::ToInputArray, flow1: &mut dyn core::ToOutputArray, flow2: &mut dyn core::ToOutputArray) -> Result<()> { input_array_arg!(frame0); input_array_arg!(frame1); output_array_arg!(flow1); output_array_arg!(flow2); unsafe { sys::cv_superres_DenseOpticalFlowExt_calc_const__InputArrayR_const__InputArrayR_const__OutputArrayR_const__OutputArrayR(self.as_raw_mut_Superres_DenseOpticalFlowExt(), frame0.as_raw__InputArray(), frame1.as_raw__InputArray(), flow1.as_raw__OutputArray(), flow2.as_raw__OutputArray()) }.into_result() } fn collect_garbage(&mut self) -> Result<()> { unsafe { sys::cv_superres_DenseOpticalFlowExt_collectGarbage(self.as_raw_mut_Superres_DenseOpticalFlowExt()) }.into_result() } } pub trait Superres_DualTVL1OpticalFlow: crate::superres::Superres_DenseOpticalFlowExt { fn as_raw_Superres_DualTVL1OpticalFlow(&self) -> *const c_void; fn as_raw_mut_Superres_DualTVL1OpticalFlow(&mut self) -> *mut c_void; /// ## See also /// setTau fn get_tau(&self) -> Result<f64> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getTau_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setTau getTau fn set_tau(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setTau_double(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } /// ## See also /// setLambda fn get_lambda(&self) -> Result<f64> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getLambda_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setLambda getLambda fn set_lambda(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setLambda_double(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } /// ## See also /// setTheta fn get_theta(&self) -> Result<f64> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getTheta_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setTheta getTheta fn set_theta(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setTheta_double(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } /// ## See also /// setScalesNumber fn get_scales_number(&self) -> Result<i32> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getScalesNumber_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setScalesNumber getScalesNumber fn set_scales_number(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setScalesNumber_int(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } /// ## See also /// setWarpingsNumber fn get_warpings_number(&self) -> Result<i32> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getWarpingsNumber_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setWarpingsNumber getWarpingsNumber fn set_warpings_number(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setWarpingsNumber_int(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } /// ## See also /// setEpsilon fn get_epsilon(&self) -> Result<f64> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getEpsilon_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setEpsilon getEpsilon fn set_epsilon(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setEpsilon_double(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } /// ## See also /// setIterations fn get_iterations(&self) -> Result<i32> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getIterations_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setIterations getIterations fn set_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setIterations_int(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } /// ## See also /// setUseInitialFlow fn get_use_initial_flow(&self) -> Result<bool> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_getUseInitialFlow_const(self.as_raw_Superres_DualTVL1OpticalFlow()) }.into_result() } /// ## See also /// setUseInitialFlow getUseInitialFlow fn set_use_initial_flow(&mut self, val: bool) -> Result<()> { unsafe { sys::cv_superres_DualTVL1OpticalFlow_setUseInitialFlow_bool(self.as_raw_mut_Superres_DualTVL1OpticalFlow(), val) }.into_result() } } pub trait Superres_FarnebackOpticalFlow: crate::superres::Superres_DenseOpticalFlowExt { fn as_raw_Superres_FarnebackOpticalFlow(&self) -> *const c_void; fn as_raw_mut_Superres_FarnebackOpticalFlow(&mut self) -> *mut c_void; /// ## See also /// setPyrScale fn get_pyr_scale(&self) -> Result<f64> { unsafe { sys::cv_superres_FarnebackOpticalFlow_getPyrScale_const(self.as_raw_Superres_FarnebackOpticalFlow()) }.into_result() } /// ## See also /// setPyrScale getPyrScale fn set_pyr_scale(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_FarnebackOpticalFlow_setPyrScale_double(self.as_raw_mut_Superres_FarnebackOpticalFlow(), val) }.into_result() } /// ## See also /// setLevelsNumber fn get_levels_number(&self) -> Result<i32> { unsafe { sys::cv_superres_FarnebackOpticalFlow_getLevelsNumber_const(self.as_raw_Superres_FarnebackOpticalFlow()) }.into_result() } /// ## See also /// setLevelsNumber getLevelsNumber fn set_levels_number(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_FarnebackOpticalFlow_setLevelsNumber_int(self.as_raw_mut_Superres_FarnebackOpticalFlow(), val) }.into_result() } /// ## See also /// setWindowSize fn get_window_size(&self) -> Result<i32> { unsafe { sys::cv_superres_FarnebackOpticalFlow_getWindowSize_const(self.as_raw_Superres_FarnebackOpticalFlow()) }.into_result() } /// ## See also /// setWindowSize getWindowSize fn set_window_size(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_FarnebackOpticalFlow_setWindowSize_int(self.as_raw_mut_Superres_FarnebackOpticalFlow(), val) }.into_result() } /// ## See also /// setIterations fn get_iterations(&self) -> Result<i32> { unsafe { sys::cv_superres_FarnebackOpticalFlow_getIterations_const(self.as_raw_Superres_FarnebackOpticalFlow()) }.into_result() } /// ## See also /// setIterations getIterations fn set_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_FarnebackOpticalFlow_setIterations_int(self.as_raw_mut_Superres_FarnebackOpticalFlow(), val) }.into_result() } /// ## See also /// setPolyN fn get_poly_n(&self) -> Result<i32> { unsafe { sys::cv_superres_FarnebackOpticalFlow_getPolyN_const(self.as_raw_Superres_FarnebackOpticalFlow()) }.into_result() } /// ## See also /// setPolyN getPolyN fn set_poly_n(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_FarnebackOpticalFlow_setPolyN_int(self.as_raw_mut_Superres_FarnebackOpticalFlow(), val) }.into_result() } /// ## See also /// setPolySigma fn get_poly_sigma(&self) -> Result<f64> { unsafe { sys::cv_superres_FarnebackOpticalFlow_getPolySigma_const(self.as_raw_Superres_FarnebackOpticalFlow()) }.into_result() } /// ## See also /// setPolySigma getPolySigma fn set_poly_sigma(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_FarnebackOpticalFlow_setPolySigma_double(self.as_raw_mut_Superres_FarnebackOpticalFlow(), val) }.into_result() } /// ## See also /// setFlags fn get_flags(&self) -> Result<i32> { unsafe { sys::cv_superres_FarnebackOpticalFlow_getFlags_const(self.as_raw_Superres_FarnebackOpticalFlow()) }.into_result() } /// ## See also /// setFlags getFlags fn set_flags(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_FarnebackOpticalFlow_setFlags_int(self.as_raw_mut_Superres_FarnebackOpticalFlow(), val) }.into_result() } } pub trait Superres_FrameSource { fn as_raw_Superres_FrameSource(&self) -> *const c_void; fn as_raw_mut_Superres_FrameSource(&mut self) -> *mut c_void; fn next_frame(&mut self, frame: &mut dyn core::ToOutputArray) -> Result<()> { output_array_arg!(frame); unsafe { sys::cv_superres_FrameSource_nextFrame_const__OutputArrayR(self.as_raw_mut_Superres_FrameSource(), frame.as_raw__OutputArray()) }.into_result() } fn reset(&mut self) -> Result<()> { unsafe { sys::cv_superres_FrameSource_reset(self.as_raw_mut_Superres_FrameSource()) }.into_result() } } pub trait Superres_PyrLKOpticalFlow: crate::superres::Superres_DenseOpticalFlowExt { fn as_raw_Superres_PyrLKOpticalFlow(&self) -> *const c_void; fn as_raw_mut_Superres_PyrLKOpticalFlow(&mut self) -> *mut c_void; /// ## See also /// setWindowSize fn get_window_size(&self) -> Result<i32> { unsafe { sys::cv_superres_PyrLKOpticalFlow_getWindowSize_const(self.as_raw_Superres_PyrLKOpticalFlow()) }.into_result() } /// ## See also /// setWindowSize getWindowSize fn set_window_size(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_PyrLKOpticalFlow_setWindowSize_int(self.as_raw_mut_Superres_PyrLKOpticalFlow(), val) }.into_result() } /// ## See also /// setMaxLevel fn get_max_level(&self) -> Result<i32> { unsafe { sys::cv_superres_PyrLKOpticalFlow_getMaxLevel_const(self.as_raw_Superres_PyrLKOpticalFlow()) }.into_result() } /// ## See also /// setMaxLevel getMaxLevel fn set_max_level(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_PyrLKOpticalFlow_setMaxLevel_int(self.as_raw_mut_Superres_PyrLKOpticalFlow(), val) }.into_result() } /// ## See also /// setIterations fn get_iterations(&self) -> Result<i32> { unsafe { sys::cv_superres_PyrLKOpticalFlow_getIterations_const(self.as_raw_Superres_PyrLKOpticalFlow()) }.into_result() } /// ## See also /// setIterations getIterations fn set_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_PyrLKOpticalFlow_setIterations_int(self.as_raw_mut_Superres_PyrLKOpticalFlow(), val) }.into_result() } } /// Base class for Super Resolution algorithms. /// /// The class is only used to define the common interface for the whole family of Super Resolution /// algorithms. pub trait Superres_SuperResolution: core::AlgorithmTrait + crate::superres::Superres_FrameSource { fn as_raw_Superres_SuperResolution(&self) -> *const c_void; fn as_raw_mut_Superres_SuperResolution(&mut self) -> *mut c_void; /// Set input frame source for Super Resolution algorithm. /// /// ## Parameters /// * frameSource: Input frame source fn set_input(&mut self, frame_source: &core::Ptr::<dyn crate::superres::Superres_FrameSource>) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setInput_const_Ptr_FrameSource_R(self.as_raw_mut_Superres_SuperResolution(), frame_source.as_raw_PtrOfSuperres_FrameSource()) }.into_result() } /// Process next frame from input and return output result. /// /// ## Parameters /// * frame: Output result fn next_frame(&mut self, frame: &mut dyn core::ToOutputArray) -> Result<()> { output_array_arg!(frame); unsafe { sys::cv_superres_SuperResolution_nextFrame_const__OutputArrayR(self.as_raw_mut_Superres_SuperResolution(), frame.as_raw__OutputArray()) }.into_result() } fn reset(&mut self) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_reset(self.as_raw_mut_Superres_SuperResolution()) }.into_result() } /// Clear all inner buffers. fn collect_garbage(&mut self) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_collectGarbage(self.as_raw_mut_Superres_SuperResolution()) }.into_result() } /// Scale factor /// ## See also /// setScale fn get_scale(&self) -> Result<i32> { unsafe { sys::cv_superres_SuperResolution_getScale_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Scale factor /// ## See also /// setScale getScale fn set_scale(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setScale_int(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Iterations count /// ## See also /// setIterations fn get_iterations(&self) -> Result<i32> { unsafe { sys::cv_superres_SuperResolution_getIterations_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Iterations count /// ## See also /// setIterations getIterations fn set_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setIterations_int(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Asymptotic value of steepest descent method /// ## See also /// setTau fn get_tau(&self) -> Result<f64> { unsafe { sys::cv_superres_SuperResolution_getTau_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Asymptotic value of steepest descent method /// ## See also /// setTau getTau fn set_tau(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setTau_double(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Weight parameter to balance data term and smoothness term /// ## See also /// setLambda fn get_lambda(&self) -> Result<f64> { unsafe { sys::cv_superres_SuperResolution_getLambda_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Weight parameter to balance data term and smoothness term /// ## See also /// setLambda getLambda fn set_lambda(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setLambda_double(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Parameter of spacial distribution in Bilateral-TV /// ## See also /// setAlpha fn get_alpha(&self) -> Result<f64> { unsafe { sys::cv_superres_SuperResolution_getAlpha_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Parameter of spacial distribution in Bilateral-TV /// ## See also /// setAlpha getAlpha fn set_alpha(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setAlpha_double(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Kernel size of Bilateral-TV filter /// ## See also /// setKernelSize fn get_kernel_size(&self) -> Result<i32> { unsafe { sys::cv_superres_SuperResolution_getKernelSize_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Kernel size of Bilateral-TV filter /// ## See also /// setKernelSize getKernelSize fn set_kernel_size(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setKernelSize_int(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Gaussian blur kernel size /// ## See also /// setBlurKernelSize fn get_blur_kernel_size(&self) -> Result<i32> { unsafe { sys::cv_superres_SuperResolution_getBlurKernelSize_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Gaussian blur kernel size /// ## See also /// setBlurKernelSize getBlurKernelSize fn set_blur_kernel_size(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setBlurKernelSize_int(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Gaussian blur sigma /// ## See also /// setBlurSigma fn get_blur_sigma(&self) -> Result<f64> { unsafe { sys::cv_superres_SuperResolution_getBlurSigma_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Gaussian blur sigma /// ## See also /// setBlurSigma getBlurSigma fn set_blur_sigma(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setBlurSigma_double(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Radius of the temporal search area /// ## See also /// setTemporalAreaRadius fn get_temporal_area_radius(&self) -> Result<i32> { unsafe { sys::cv_superres_SuperResolution_getTemporalAreaRadius_const(self.as_raw_Superres_SuperResolution()) }.into_result() } /// Radius of the temporal search area /// ## See also /// setTemporalAreaRadius getTemporalAreaRadius fn set_temporal_area_radius(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setTemporalAreaRadius_int(self.as_raw_mut_Superres_SuperResolution(), val) }.into_result() } /// Dense optical flow algorithm /// ## See also /// setOpticalFlow fn get_optical_flow(&self) -> Result<core::Ptr::<dyn crate::superres::Superres_DenseOpticalFlowExt>> { unsafe { sys::cv_superres_SuperResolution_getOpticalFlow_const(self.as_raw_Superres_SuperResolution()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::superres::Superres_DenseOpticalFlowExt>::opencv_from_extern(r) } ) } /// Dense optical flow algorithm /// ## See also /// setOpticalFlow getOpticalFlow fn set_optical_flow(&mut self, val: &core::Ptr::<dyn crate::superres::Superres_DenseOpticalFlowExt>) -> Result<()> { unsafe { sys::cv_superres_SuperResolution_setOpticalFlow_const_Ptr_DenseOpticalFlowExt_R(self.as_raw_mut_Superres_SuperResolution(), val.as_raw_PtrOfSuperres_DenseOpticalFlowExt()) }.into_result() } }
42.591346
310
0.75095
036e8d4869392ca38aaa01b612a5f7a7d9a16ce9
868
// plotters-conrod // // Conrod backend for Plotters // Copyright: 2020, Valerian Saliou <[email protected]> // License: MIT /*! The Plotters Conrod backend. This is an implementation of a Conrod backend for Plotters. This is more efficient than using the default Bitmap backend when plotting in Conrod, as it has been observed that Conrod was quite inefficient at re-rendering images at high FPS (eg. for real-time plotting). This backend has been optimized as for speed, and as to render plots that look very similar to the default Bitmap backend, if not indistinguishable. See the documentation for [ConrodBackend](struct.ConrodBackend.html) for more details. */ mod backend; mod error; mod graph; mod triangulate; mod utils; pub use backend::ConrodBackend; pub use error::ConrodBackendError; pub use graph::ConrodBackendReusableGraph;
33.384615
271
0.77765
0afb28bb4dccd097bb7a5f146429e6e61e302264
343
#![warn(rust_2018_idioms)] use bytes::buf::{Buf, BufExt}; #[test] fn long_take() { // Tests that get a take with a size greater than the buffer length will not // overrun the buffer. Regression test for #138. let buf = b"hello world".take(100); assert_eq!(11, buf.remaining()); assert_eq!(b"hello world", buf.bytes()); }
26.384615
80
0.655977
1802c302466855180a922d42f1a5c5afd317fce5
72,676
// This file is part of Substrate. // Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Traits for FRAME. //! //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. use sp_std::{prelude::*, result, marker::PhantomData, ops::Div, fmt::Debug}; use codec::{FullCodec, Codec, Encode, Decode, EncodeLike}; use sp_core::u32_trait::Value as U32; use sp_runtime::{ RuntimeDebug, ConsensusEngineId, DispatchResult, DispatchError, traits::{ MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput, Bounded, Zero, BadOrigin, AtLeast32BitUnsigned, UniqueSaturatedFrom, UniqueSaturatedInto, SaturatedConversion, }, }; use crate::dispatch::Parameter; use crate::storage::StorageMap; use crate::weights::Weight; use bitflags::bitflags; use impl_trait_for_tuples::impl_for_tuples; /// Re-expected for the macro. #[doc(hidden)] pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; /// Simple trait for providing a filter over a reference to some type. pub trait Filter<T> { /// Determine if a given value should be allowed through the filter (returns `true`) or not. fn filter(_: &T) -> bool; } impl<T> Filter<T> for () { fn filter(_: &T) -> bool { true } } /// Trait to add a constraint onto the filter. pub trait FilterStack<T>: Filter<T> { /// The type used to archive the stack. type Stack; /// Add a new `constraint` onto the filter. fn push(constraint: impl Fn(&T) -> bool + 'static); /// Removes the most recently pushed, and not-yet-popped, constraint from the filter. fn pop(); /// Clear the filter, returning a value that may be used later to `restore` it. fn take() -> Self::Stack; /// Restore the filter from a previous `take` operation. fn restore(taken: Self::Stack); } /// Guard type for pushing a constraint to a `FilterStack` and popping when dropped. pub struct FilterStackGuard<F: FilterStack<T>, T>(PhantomData<(F, T)>); /// Guard type for clearing all pushed constraints from a `FilterStack` and reinstating them when /// dropped. pub struct ClearFilterGuard<F: FilterStack<T>, T>(Option<F::Stack>, PhantomData<T>); impl<F: FilterStack<T>, T> FilterStackGuard<F, T> { /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when /// this instance is dropped. pub fn new(constraint: impl Fn(&T) -> bool + 'static) -> Self { F::push(constraint); Self(PhantomData) } } impl<F: FilterStack<T>, T> Drop for FilterStackGuard<F, T> { fn drop(&mut self) { F::pop(); } } impl<F: FilterStack<T>, T> ClearFilterGuard<F, T> { /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when /// this instance is dropped. pub fn new() -> Self { Self(Some(F::take()), PhantomData) } } impl<F: FilterStack<T>, T> Drop for ClearFilterGuard<F, T> { fn drop(&mut self) { if let Some(taken) = self.0.take() { F::restore(taken); } } } /// Simple trait for providing a filter over a reference to some type, given an instance of itself. pub trait InstanceFilter<T>: Sized + Send + Sync { /// Determine if a given value should be allowed through the filter (returns `true`) or not. fn filter(&self, _: &T) -> bool; /// Determines whether `self` matches at least everything that `_o` does. fn is_superset(&self, _o: &Self) -> bool { false } } impl<T> InstanceFilter<T> for () { fn filter(&self, _: &T) -> bool { true } fn is_superset(&self, _o: &Self) -> bool { true } } #[macro_export] macro_rules! impl_filter_stack { ($target:ty, $base:ty, $call:ty, $module:ident) => { #[cfg(feature = "std")] mod $module { #[allow(unused_imports)] use super::*; use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; thread_local! { static FILTER: RefCell<Vec<Box<dyn Fn(&$call) -> bool + 'static>>> = RefCell::new(Vec::new()); } impl Filter<$call> for $target { fn filter(call: &$call) -> bool { <$base>::filter(call) && FILTER.with(|filter| filter.borrow().iter().all(|f| f(call))) } } impl FilterStack<$call> for $target { type Stack = Vec<Box<dyn Fn(&$call) -> bool + 'static>>; fn push(f: impl Fn(&$call) -> bool + 'static) { FILTER.with(|filter| filter.borrow_mut().push(Box::new(f))); } fn pop() { FILTER.with(|filter| filter.borrow_mut().pop()); } fn take() -> Self::Stack { FILTER.with(|filter| take(filter.borrow_mut().as_mut())) } fn restore(mut s: Self::Stack) { FILTER.with(|filter| swap(filter.borrow_mut().as_mut(), &mut s)); } } } #[cfg(not(feature = "std"))] mod $module { #[allow(unused_imports)] use super::*; use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; struct ThisFilter(RefCell<Vec<Box<dyn Fn(&$call) -> bool + 'static>>>); // NOTE: Safe only in wasm (guarded above) because there's only one thread. unsafe impl Send for ThisFilter {} unsafe impl Sync for ThisFilter {} static FILTER: ThisFilter = ThisFilter(RefCell::new(Vec::new())); impl Filter<$call> for $target { fn filter(call: &$call) -> bool { <$base>::filter(call) && FILTER.0.borrow().iter().all(|f| f(call)) } } impl FilterStack<$call> for $target { type Stack = Vec<Box<dyn Fn(&$call) -> bool + 'static>>; fn push(f: impl Fn(&$call) -> bool + 'static) { FILTER.0.borrow_mut().push(Box::new(f)); } fn pop() { FILTER.0.borrow_mut().pop(); } fn take() -> Self::Stack { take(FILTER.0.borrow_mut().as_mut()) } fn restore(mut s: Self::Stack) { swap(FILTER.0.borrow_mut().as_mut(), &mut s); } } } } } /// Type that provide some integrity tests. /// /// This implemented for modules by `decl_module`. #[impl_for_tuples(30)] pub trait IntegrityTest { /// Run integrity test. /// /// The test is not executed in a externalities provided environment. fn integrity_test() {} } #[cfg(test)] mod test_impl_filter_stack { use super::*; pub struct IsCallable; pub struct BaseFilter; impl Filter<u32> for BaseFilter { fn filter(x: &u32) -> bool { x % 2 == 0 } } impl_filter_stack!( crate::traits::test_impl_filter_stack::IsCallable, crate::traits::test_impl_filter_stack::BaseFilter, u32, is_callable ); #[test] fn impl_filter_stack_should_work() { assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(IsCallable::filter(&42)); assert!(!IsCallable::filter(&43)); IsCallable::push(|x| *x < 42); assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(!IsCallable::filter(&42)); IsCallable::push(|x| *x % 3 == 0); assert!(IsCallable::filter(&36)); assert!(!IsCallable::filter(&40)); IsCallable::pop(); assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(!IsCallable::filter(&42)); let saved = IsCallable::take(); assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(IsCallable::filter(&42)); assert!(!IsCallable::filter(&43)); IsCallable::restore(saved); assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(!IsCallable::filter(&42)); IsCallable::pop(); assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(IsCallable::filter(&42)); assert!(!IsCallable::filter(&43)); } #[test] fn guards_should_work() { assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(IsCallable::filter(&42)); assert!(!IsCallable::filter(&43)); { let _guard_1 = FilterStackGuard::<IsCallable, u32>::new(|x| *x < 42); assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(!IsCallable::filter(&42)); { let _guard_2 = FilterStackGuard::<IsCallable, u32>::new(|x| *x % 3 == 0); assert!(IsCallable::filter(&36)); assert!(!IsCallable::filter(&40)); } assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(!IsCallable::filter(&42)); { let _guard_2 = ClearFilterGuard::<IsCallable, u32>::new(); assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(IsCallable::filter(&42)); assert!(!IsCallable::filter(&43)); } assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(!IsCallable::filter(&42)); } assert!(IsCallable::filter(&36)); assert!(IsCallable::filter(&40)); assert!(IsCallable::filter(&42)); assert!(!IsCallable::filter(&43)); } } /// An abstraction of a value stored within storage, but possibly as part of a larger composite /// item. pub trait StoredMap<K, T> { /// Get the item, or its default if it doesn't yet exist; we make no distinction between the /// two. fn get(k: &K) -> T; /// Get whether the item takes up any storage. If this is `false`, then `get` will certainly /// return the `T::default()`. If `true`, then there is no implication for `get` (i.e. it /// may return any value, including the default). /// /// NOTE: This may still be `true`, even after `remove` is called. This is the case where /// a single storage entry is shared between multiple `StoredMap` items single, without /// additional logic to enforce it, deletion of any one them doesn't automatically imply /// deletion of them all. fn is_explicit(k: &K) -> bool; /// Mutate the item. fn mutate<R>(k: &K, f: impl FnOnce(&mut T) -> R) -> R; /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. fn mutate_exists<R>(k: &K, f: impl FnOnce(&mut Option<T>) -> R) -> R; /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is /// returned. It is removed or reset to default value if it has been mutated to `None` fn try_mutate_exists<R, E>(k: &K, f: impl FnOnce(&mut Option<T>) -> Result<R, E>) -> Result<R, E>; /// Set the item to something new. fn insert(k: &K, t: T) { Self::mutate(k, |i| *i = t); } /// Remove the item or otherwise replace it with its default value; we don't care which. fn remove(k: &K); } /// A simple, generic one-parameter event notifier/handler. pub trait Happened<T> { /// The thing happened. fn happened(t: &T); } impl<T> Happened<T> for () { fn happened(_: &T) {} } /// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this /// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this /// would break the ability to have custom impls of `StoredValue`. The other workaround is to /// implement it directly in the macro. /// /// This form has the advantage that two additional types are provides, `Created` and `Removed`, /// which are both generic events that can be tied to handlers to do something in the case of being /// about to create an account where one didn't previously exist (at all; not just where it used to /// be the default value), or where the account is being removed or reset back to the default value /// where previously it did exist (though may have been in a default state). This works well with /// system module's `CallOnCreatedAccount` and `CallKillAccount`. pub struct StorageMapShim< S, Created, Removed, K, T >(sp_std::marker::PhantomData<(S, Created, Removed, K, T)>); impl< S: StorageMap<K, T, Query=T>, Created: Happened<K>, Removed: Happened<K>, K: FullCodec, T: FullCodec, > StoredMap<K, T> for StorageMapShim<S, Created, Removed, K, T> { fn get(k: &K) -> T { S::get(k) } fn is_explicit(k: &K) -> bool { S::contains_key(k) } fn insert(k: &K, t: T) { let existed = S::contains_key(&k); S::insert(k, t); if !existed { Created::happened(k); } } fn remove(k: &K) { let existed = S::contains_key(&k); S::remove(k); if existed { Removed::happened(&k); } } fn mutate<R>(k: &K, f: impl FnOnce(&mut T) -> R) -> R { let existed = S::contains_key(&k); let r = S::mutate(k, f); if !existed { Created::happened(k); } r } fn mutate_exists<R>(k: &K, f: impl FnOnce(&mut Option<T>) -> R) -> R { let (existed, exists, r) = S::mutate_exists(k, |maybe_value| { let existed = maybe_value.is_some(); let r = f(maybe_value); (existed, maybe_value.is_some(), r) }); if !existed && exists { Created::happened(k); } else if existed && !exists { Removed::happened(k); } r } fn try_mutate_exists<R, E>(k: &K, f: impl FnOnce(&mut Option<T>) -> Result<R, E>) -> Result<R, E> { S::try_mutate_exists(k, |maybe_value| { let existed = maybe_value.is_some(); f(maybe_value).map(|v| (existed, maybe_value.is_some(), v)) }).map(|(existed, exists, v)| { if !existed && exists { Created::happened(k); } else if existed && !exists { Removed::happened(k); } v }) } } /// Something that can estimate at which block the next session rotation will happen. This should /// be the same logical unit that dictates `ShouldEndSession` to the session module. No Assumptions /// are made about the scheduling of the sessions. pub trait EstimateNextSessionRotation<BlockNumber> { /// Return the block number at which the next session rotation is estimated to happen. /// /// None should be returned if the estimation fails to come to an answer fn estimate_next_session_rotation(now: BlockNumber) -> Option<BlockNumber>; /// Return the weight of calling `estimate_next_session_rotation` fn weight(now: BlockNumber) -> Weight; } impl<BlockNumber: Bounded> EstimateNextSessionRotation<BlockNumber> for () { fn estimate_next_session_rotation(_: BlockNumber) -> Option<BlockNumber> { Default::default() } fn weight(_: BlockNumber) -> Weight { 0 } } /// Something that can estimate at which block the next `new_session` will be triggered. This must /// always be implemented by the session module. pub trait EstimateNextNewSession<BlockNumber> { /// Return the block number at which the next new session is estimated to happen. fn estimate_next_new_session(now: BlockNumber) -> Option<BlockNumber>; /// Return the weight of calling `estimate_next_new_session` fn weight(now: BlockNumber) -> Weight; } impl<BlockNumber: Bounded> EstimateNextNewSession<BlockNumber> for () { fn estimate_next_new_session(_: BlockNumber) -> Option<BlockNumber> { Default::default() } fn weight(_: BlockNumber) -> Weight { 0 } } /// Anything that can have a `::len()` method. pub trait Len { /// Return the length of data type. fn len(&self) -> usize; } impl<T: IntoIterator + Clone,> Len for T where <T as IntoIterator>::IntoIter: ExactSizeIterator { fn len(&self) -> usize { self.clone().into_iter().len() } } /// A trait for querying a single value from a type. /// /// It is not required that the value is constant. pub trait Get<T> { /// Return the current value. fn get() -> T; } impl<T: Default> Get<T> for () { fn get() -> T { T::default() } } /// A trait for querying whether a type can be said to "contain" a value. pub trait Contains<T: Ord> { /// Return `true` if this "contains" the given value `t`. fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } /// Get a vector of all members in the set, ordered. fn sorted_members() -> Vec<T>; /// Get the number of items in the set. fn count() -> usize { Self::sorted_members().len() } /// Add an item that would satisfy `contains`. It does not make sure any other /// state is correctly maintained or generated. /// /// **Should be used for benchmarking only!!!** #[cfg(feature = "runtime-benchmarks")] fn add(_t: &T) { unimplemented!() } } /// A trait for querying bound for the length of an implementation of `Contains` pub trait ContainsLengthBound { /// Minimum number of elements contained fn min_len() -> usize; /// Maximum number of elements contained fn max_len() -> usize; } /// Determiner to say whether a given account is unused. pub trait IsDeadAccount<AccountId> { /// Is the given account dead? fn is_dead_account(who: &AccountId) -> bool; } impl<AccountId> IsDeadAccount<AccountId> for () { fn is_dead_account(_who: &AccountId) -> bool { true } } /// Handler for when a new account has been created. #[impl_for_tuples(30)] pub trait OnNewAccount<AccountId> { /// A new account `who` has been registered. fn on_new_account(who: &AccountId); } /// The account with the given id was reaped. #[impl_for_tuples(30)] pub trait OnKilledAccount<AccountId> { /// The account with the given id was reaped. fn on_killed_account(who: &AccountId); } /// A trait for finding the author of a block header based on the `PreRuntime` digests contained /// within it. pub trait FindAuthor<Author> { /// Find the author of a block based on the pre-runtime digests. fn find_author<'a, I>(digests: I) -> Option<Author> where I: 'a + IntoIterator<Item=(ConsensusEngineId, &'a [u8])>; } impl<A> FindAuthor<A> for () { fn find_author<'a, I>(_: I) -> Option<A> where I: 'a + IntoIterator<Item=(ConsensusEngineId, &'a [u8])> { None } } /// A trait for verifying the seal of a header and returning the author. pub trait VerifySeal<Header, Author> { /// Verify a header and return the author, if any. fn verify_seal(header: &Header) -> Result<Option<Author>, &'static str>; } /// Something which can compute and check proofs of /// a historical key owner and return full identification data of that /// key owner. pub trait KeyOwnerProofSystem<Key> { /// The proof of membership itself. type Proof: Codec; /// The full identification of a key owner and the stash account. type IdentificationTuple: Codec; /// Prove membership of a key owner in the current block-state. /// /// This should typically only be called off-chain, since it may be /// computationally heavy. /// /// Returns `Some` iff the key owner referred to by the given `key` is a /// member of the current set. fn prove(key: Key) -> Option<Self::Proof>; /// Check a proof of membership on-chain. Return `Some` iff the proof is /// valid and recent enough to check. fn check_proof(key: Key, proof: Self::Proof) -> Option<Self::IdentificationTuple>; } impl<Key> KeyOwnerProofSystem<Key> for () { // The proof and identification tuples is any bottom type to guarantee that the methods of this // implementation can never be called or return anything other than `None`. type Proof = crate::Void; type IdentificationTuple = crate::Void; fn prove(_key: Key) -> Option<Self::Proof> { None } fn check_proof(_key: Key, _proof: Self::Proof) -> Option<Self::IdentificationTuple> { None } } /// Handler for when some currency "account" decreased in balance for /// some reason. /// /// The only reason at present for an increase would be for validator rewards, but /// there may be other reasons in the future or for other chains. /// /// Reasons for decreases include: /// /// - Someone got slashed. /// - Someone paid for a transaction to be included. pub trait OnUnbalanced<Imbalance: TryDrop> { /// Handler for some imbalances. The different imbalances might have different origins or /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all /// of them. Infallible. fn on_unbalanceds<B>(amounts: impl Iterator<Item=Imbalance>) where Imbalance: crate::traits::Imbalance<B> { Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) } /// Handler for some imbalance. Infallible. fn on_unbalanced(amount: Imbalance) { amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) } /// Actually handle a non-zero imbalance. You probably want to implement this rather than /// `on_unbalanced`. fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } } impl<Imbalance: TryDrop> OnUnbalanced<Imbalance> for () {} /// Simple boolean for whether an account needs to be kept in existence. #[derive(Copy, Clone, Eq, PartialEq)] pub enum ExistenceRequirement { /// Operation must not result in the account going out of existence. /// /// Note this implies that if the account never existed in the first place, then the operation /// may legitimately leave the account unchanged and still non-existent. KeepAlive, /// Operation may result in account going out of existence. AllowDeath, } /// A type for which some values make sense to be able to drop without further consideration. pub trait TryDrop: Sized { /// Drop an instance cleanly. Only works if its value represents "no-operation". fn try_drop(self) -> Result<(), Self>; } /// A trait for a not-quite Linear Type that tracks an imbalance. /// /// Functions that alter account balances return an object of this trait to /// express how much account balances have been altered in aggregate. If /// dropped, the currency system will take some default steps to deal with /// the imbalance (`balances` module simply reduces or increases its /// total issuance). Your module should generally handle it in some way, /// good practice is to do so in a configurable manner using an /// `OnUnbalanced` type for each situation in which your module needs to /// handle an imbalance. /// /// Imbalances can either be Positive (funds were added somewhere without /// being subtracted elsewhere - e.g. a reward) or Negative (funds deducted /// somewhere without an equal and opposite addition - e.g. a slash or /// system fee payment). /// /// Since they are unsigned, the actual type is always Positive or Negative. /// The trait makes no distinction except to define the `Opposite` type. /// /// New instances of zero value can be created (`zero`) and destroyed /// (`drop_zero`). /// /// Existing instances can be `split` and merged either consuming `self` with /// `merge` or mutating `self` with `subsume`. If the target is an `Option`, /// then `maybe_merge` and `maybe_subsume` might work better. Instances can /// also be `offset` with an `Opposite` that is less than or equal to in value. /// /// You can always retrieve the raw balance value using `peek`. #[must_use] pub trait Imbalance<Balance>: Sized + TryDrop { /// The oppositely imbalanced type. They come in pairs. type Opposite: Imbalance<Balance>; /// The zero imbalance. Can be destroyed with `drop_zero`. fn zero() -> Self; /// Drop an instance cleanly. Only works if its `self.value()` is zero. fn drop_zero(self) -> Result<(), Self>; /// Consume `self` and return two independent instances; the first /// is guaranteed to be at most `amount` and the second will be the remainder. fn split(self, amount: Balance) -> (Self, Self); /// Consume `self` and return two independent instances; the amounts returned will be in /// approximately the same ratio as `first`:`second`. /// /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should /// fit into a `u32`. Overflow will safely saturate in both cases. fn ration(self, first: u32, second: u32) -> (Self, Self) where Balance: From<u32> + Saturating + Div<Output=Balance> { let total: u32 = first.saturating_add(second); let amount1 = self.peek().saturating_mul(first.into()) / total.into(); self.split(amount1) } /// Consume self and add its two components, defined by the first component's balance, /// element-wise to two pre-existing Imbalances. /// /// A convenient replacement for `split` and `merge`. fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { let (a, b) = self.split(amount); (a.merge(others.0), b.merge(others.1)) } /// Consume self and add its two components, defined by the ratio `first`:`second`, /// element-wise to two pre-existing Imbalances. /// /// A convenient replacement for `split` and `merge`. fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) where Balance: From<u32> + Saturating + Div<Output=Balance> { let (a, b) = self.ration(first, second); (a.merge(others.0), b.merge(others.1)) } /// Consume self and add its two components, defined by the first component's balance, /// element-wise into two pre-existing Imbalance refs. /// /// A convenient replacement for `split` and `subsume`. fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { let (a, b) = self.split(amount); others.0.subsume(a); others.1.subsume(b); } /// Consume self and add its two components, defined by the ratio `first`:`second`, /// element-wise to two pre-existing Imbalances. /// /// A convenient replacement for `split` and `merge`. fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) where Balance: From<u32> + Saturating + Div<Output=Balance> { let (a, b) = self.ration(first, second); others.0.subsume(a); others.1.subsume(b); } /// Consume `self` and an `other` to return a new instance that combines /// both. fn merge(self, other: Self) -> Self; /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with /// reversed arguments. fn merge_into(self, other: &mut Self) { other.subsume(self) } /// Consume `self` and maybe an `other` to return a new instance that combines /// both. fn maybe_merge(self, other: Option<Self>) -> Self { if let Some(o) = other { self.merge(o) } else { self } } /// Consume an `other` to mutate `self` into a new instance that combines /// both. fn subsume(&mut self, other: Self); /// Maybe consume an `other` to mutate `self` into a new instance that combines /// both. fn maybe_subsume(&mut self, other: Option<Self>) { if let Some(o) = other { self.subsume(o) } } /// Consume self and along with an opposite counterpart to return /// a combined result. /// /// Returns `Ok` along with a new instance of `Self` if this instance has a /// greater value than the `other`. Otherwise returns `Err` with an instance of /// the `Opposite`. In both cases the value represents the combination of `self` /// and `other`. fn offset(self, other: Self::Opposite) -> Result<Self, Self::Opposite>; /// The raw value of self. fn peek(&self) -> Balance; } /// Either a positive or a negative imbalance. pub enum SignedImbalance<B, P: Imbalance<B>>{ /// A positive imbalance (funds have been created but none destroyed). Positive(P), /// A negative imbalance (funds have been destroyed but none created). Negative(P::Opposite), } impl< P: Imbalance<B, Opposite=N>, N: Imbalance<B, Opposite=P>, B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, > SignedImbalance<B, P> { pub fn zero() -> Self { SignedImbalance::Positive(P::zero()) } pub fn drop_zero(self) -> Result<(), Self> { match self { SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), } } /// Consume `self` and an `other` to return a new instance that combines /// both. pub fn merge(self, other: Self) -> Self { match (self, other) { (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => SignedImbalance::Positive(one.merge(other)), (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => SignedImbalance::Negative(one.merge(other)), (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => if one.peek() > other.peek() { SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) } else { SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) }, (one, other) => other.merge(one), } } } /// Split an unbalanced amount two ways between a common divisor. pub struct SplitTwoWays< Balance, Imbalance, Part1, Target1, Part2, Target2, >(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); impl< Balance: From<u32> + Saturating + Div<Output=Balance>, I: Imbalance<Balance>, Part1: U32, Target1: OnUnbalanced<I>, Part2: U32, Target2: OnUnbalanced<I>, > OnUnbalanced<I> for SplitTwoWays<Balance, I, Part1, Target1, Part2, Target2> { fn on_nonzero_unbalanced(amount: I) { let total: u32 = Part1::VALUE + Part2::VALUE; let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); let (imb1, imb2) = amount.split(amount1); Target1::on_unbalanced(imb1); Target2::on_unbalanced(imb2); } } /// Abstraction over a fungible assets system. pub trait Currency<AccountId> { /// The balance of an account. type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default; /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. type PositiveImbalance: Imbalance<Self::Balance, Opposite=Self::NegativeImbalance>; /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. type NegativeImbalance: Imbalance<Self::Balance, Opposite=Self::PositiveImbalance>; // PUBLIC IMMUTABLES /// The combined balance of `who`. fn total_balance(who: &AccountId) -> Self::Balance; /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no /// balance changes in the meantime and only the reserved balance is not taken into account. fn can_slash(who: &AccountId, value: Self::Balance) -> bool; /// The total amount of issuance in the system. fn total_issuance() -> Self::Balance; /// The minimum balance any single account may have. This is equivalent to the `Balances` module's /// `ExistentialDeposit`. fn minimum_balance() -> Self::Balance; /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will /// typically be used to reduce an account by the same amount with e.g. `settle`. /// /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example /// in the case of underflow. fn burn(amount: Self::Balance) -> Self::PositiveImbalance; /// Increase the total issuance by `amount` and return the according imbalance. The imbalance /// will typically be used to increase an account by the same amount with e.g. /// `resolve_into_existing` or `resolve_creating`. /// /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example /// in the case of overflow. fn issue(amount: Self::Balance) -> Self::NegativeImbalance; /// Produce a pair of imbalances that cancel each other out exactly. /// /// This is just the same as burning and issuing the same amount and has no effect on the /// total issuance. fn pair(amount: Self::Balance) -> (Self::PositiveImbalance, Self::NegativeImbalance) { (Self::burn(amount.clone()), Self::issue(amount)) } /// The 'free' balance of a given account. /// /// This is the only balance that matters in terms of most operations on tokens. It alone /// is used to determine the balance when in the contract execution environment. When this /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is /// deleted: specifically `FreeBalance`. /// /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. fn free_balance(who: &AccountId) -> Self::Balance; /// Returns `Ok` iff the account is able to make a withdrawal of the given amount /// for the given reason. Basically, it's just a dry-run of `withdraw`. /// /// `Err(...)` with the reason why not otherwise. fn ensure_can_withdraw( who: &AccountId, _amount: Self::Balance, reasons: WithdrawReasons, new_balance: Self::Balance, ) -> DispatchResult; // PUBLIC MUTABLES (DANGEROUS) /// Transfer some liquid free balance to another staker. /// /// This is a very high-level function. It will ensure all appropriate fees are paid /// and no imbalance in the system remains. fn transfer( source: &AccountId, dest: &AccountId, value: Self::Balance, existence_requirement: ExistenceRequirement, ) -> DispatchResult; /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the /// free balance. This function cannot fail. /// /// The resulting imbalance is the first item of the tuple returned. /// /// As much funds up to `value` will be deducted as possible. If this is less than `value`, /// then a non-zero second item will be returned. fn slash( who: &AccountId, value: Self::Balance ) -> (Self::NegativeImbalance, Self::Balance); /// Mints `value` to the free balance of `who`. /// /// If `who` doesn't exist, nothing is done and an Err returned. fn deposit_into_existing( who: &AccountId, value: Self::Balance ) -> result::Result<Self::PositiveImbalance, DispatchError>; /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on /// success. fn resolve_into_existing( who: &AccountId, value: Self::NegativeImbalance, ) -> result::Result<(), Self::NegativeImbalance> { let v = value.peek(); match Self::deposit_into_existing(who, v) { Ok(opposite) => Ok(drop(value.offset(opposite))), _ => Err(value), } } /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. /// /// Infallible. fn deposit_creating( who: &AccountId, value: Self::Balance, ) -> Self::PositiveImbalance; /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on /// success. fn resolve_creating( who: &AccountId, value: Self::NegativeImbalance, ) { let v = value.peek(); drop(value.offset(Self::deposit_creating(who, v))); } /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. /// /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, /// then it returns `Err`. /// /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value /// is `value`. fn withdraw( who: &AccountId, value: Self::Balance, reasons: WithdrawReasons, liveness: ExistenceRequirement, ) -> result::Result<Self::NegativeImbalance, DispatchError>; /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. fn settle( who: &AccountId, value: Self::PositiveImbalance, reasons: WithdrawReasons, liveness: ExistenceRequirement, ) -> result::Result<(), Self::PositiveImbalance> { let v = value.peek(); match Self::withdraw(who, v, reasons, liveness) { Ok(opposite) => Ok(drop(value.offset(opposite))), _ => Err(value), } } /// Ensure an account's free balance equals some value; this will create the account /// if needed. /// /// Returns a signed imbalance and status to indicate if the account was successfully updated or update /// has led to killing of the account. fn make_free_balance_be( who: &AccountId, balance: Self::Balance, ) -> SignedImbalance<Self::Balance, Self::PositiveImbalance>; } /// Status of funds. #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] pub enum BalanceStatus { /// Funds are free, as corresponding to `free` item in Balances. Free, /// Funds are reserved, as corresponding to `reserved` item in Balances. Reserved, } /// A currency where funds can be reserved from the user. pub trait ReservableCurrency<AccountId>: Currency<AccountId> { /// Same result as `reserve(who, value)` (but without the side-effects) assuming there /// are no balance changes in the meantime. fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. /// /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` /// is less than `value`, then a non-zero second item will be returned. fn slash_reserved( who: &AccountId, value: Self::Balance ) -> (Self::NegativeImbalance, Self::Balance); /// The amount of the balance of a given account that is externally reserved; this can still get /// slashed, but gets slashed last of all. /// /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens /// that are still 'owned' by the account holder, but which are suspendable. /// /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' /// is deleted: specifically, `ReservedBalance`. /// /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. fn reserved_balance(who: &AccountId) -> Self::Balance; /// Moves `value` from balance to reserved balance. /// /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will /// be returned to notify of this. This is different behavior than `unreserve`. fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; /// Moves up to `value` from reserved balance to free balance. This function cannot fail. /// /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` /// is less than `value`, then the remaining amount will be returned. /// /// # NOTES /// /// - This is different from `reserve`. /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will /// invoke `on_reserved_too_low` and could reap the account. fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; /// Moves up to `value` from reserved balance of account `slashed` to balance of account /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, /// depending on the `status`. /// /// As much funds up to `value` will be deducted as possible. If this is less than `value`, /// then `Ok(non_zero)` will be returned. fn repatriate_reserved( slashed: &AccountId, beneficiary: &AccountId, value: Self::Balance, status: BalanceStatus, ) -> result::Result<Self::Balance, DispatchError>; } /// An identifier for a lock. Used for disambiguating different locks so that /// they can be individually replaced or removed. pub type LockIdentifier = [u8; 8]; /// A currency whose accounts can have liquidity restrictions. pub trait LockableCurrency<AccountId>: Currency<AccountId> { /// The quantity used to denote time; usually just a `BlockNumber`. type Moment; /// The maximum number of locks a user should have on their account. type MaxLocks: Get<u32>; /// Create a new balance lock on account `who`. /// /// If the new lock is valid (i.e. not already expired), it will push the struct to /// the `Locks` vec in storage. Note that you can lock more funds than a user has. /// /// If the lock `id` already exists, this will update it. fn set_lock( id: LockIdentifier, who: &AccountId, amount: Self::Balance, reasons: WithdrawReasons, ); /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all /// parameters or creates a new one if it does not exist. /// /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it /// applies the most severe constraints of the two, while `set_lock` replaces the lock /// with the new parameters. As in, `extend_lock` will set: /// - maximum `amount` /// - bitwise mask of all `reasons` fn extend_lock( id: LockIdentifier, who: &AccountId, amount: Self::Balance, reasons: WithdrawReasons, ); /// Remove an existing lock. fn remove_lock( id: LockIdentifier, who: &AccountId, ); } /// A vesting schedule over a currency. This allows a particular currency to have vesting limits /// applied to it. pub trait VestingSchedule<AccountId> { /// The quantity used to denote time; usually just a `BlockNumber`. type Moment; /// The currency that this schedule applies to. type Currency: Currency<AccountId>; /// Get the amount that is currently being vested and cannot be transferred out of this account. /// Returns `None` if the account has no vesting schedule. fn vesting_balance(who: &AccountId) -> Option<<Self::Currency as Currency<AccountId>>::Balance>; /// Adds a vesting schedule to a given account. /// /// If there already exists a vesting schedule for the given account, an `Err` is returned /// and nothing is updated. /// /// Is a no-op if the amount to be vested is zero. /// /// NOTE: This doesn't alter the free balance of the account. fn add_vesting_schedule( who: &AccountId, locked: <Self::Currency as Currency<AccountId>>::Balance, per_block: <Self::Currency as Currency<AccountId>>::Balance, starting_block: Self::Moment, ) -> DispatchResult; /// Remove a vesting schedule for a given account. /// /// NOTE: This doesn't alter the free balance of the account. fn remove_vesting_schedule(who: &AccountId); } bitflags! { /// Reasons for moving funds out of an account. #[derive(Encode, Decode)] pub struct WithdrawReasons: i8 { /// In order to pay for (system) transaction costs. const TRANSACTION_PAYMENT = 0b00000001; /// In order to transfer ownership. const TRANSFER = 0b00000010; /// In order to reserve some funds for a later return or repatriation. const RESERVE = 0b00000100; /// In order to pay some other (higher-level) fees. const FEE = 0b00001000; /// In order to tip a validator for transaction inclusion. const TIP = 0b00010000; } } impl WithdrawReasons { /// Choose all variants except for `one`. /// /// ```rust /// # use frame_support::traits::WithdrawReasons; /// # fn main() { /// assert_eq!( /// WithdrawReasons::FEE | WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE | WithdrawReasons::TIP, /// WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), /// ); /// # } /// ``` pub fn except(one: WithdrawReasons) -> WithdrawReasons { let mut flags = Self::all(); flags.toggle(one); flags } } pub trait Time { type Moment: AtLeast32Bit + Parameter + Default + Copy; fn now() -> Self::Moment; } /// Trait to deal with unix time. pub trait UnixTime { /// Return duration since `SystemTime::UNIX_EPOCH`. fn now() -> core::time::Duration; } /// Trait for type that can handle incremental changes to a set of account IDs. pub trait ChangeMembers<AccountId: Clone + Ord> { /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The /// new set is given by `new`, and need not be sorted. /// /// This resets any previous value of prime. fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec<AccountId>) { new.sort(); Self::change_members_sorted(incoming, outgoing, &new[..]); } /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The /// new set is thus given by `sorted_new` and **must be sorted**. /// /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. /// /// This resets any previous value of prime. fn change_members_sorted( incoming: &[AccountId], outgoing: &[AccountId], sorted_new: &[AccountId], ); /// Set the new members; they **must already be sorted**. This will compute the diff and use it to /// call `change_members_sorted`. /// /// This resets any previous value of prime. fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { let (incoming, outgoing) = Self::compute_members_diff(new_members, old_members); Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); } /// Compute diff between new and old members; they **must already be sorted**. /// /// Returns incoming and outgoing members. fn compute_members_diff( new_members: &[AccountId], old_members: &[AccountId] ) -> (Vec<AccountId>, Vec<AccountId>) { let mut old_iter = old_members.iter(); let mut new_iter = new_members.iter(); let mut incoming = Vec::new(); let mut outgoing = Vec::new(); let mut old_i = old_iter.next(); let mut new_i = new_iter.next(); loop { match (old_i, new_i) { (None, None) => break, (Some(old), Some(new)) if old == new => { old_i = old_iter.next(); new_i = new_iter.next(); } (Some(old), Some(new)) if old < new => { outgoing.push(old.clone()); old_i = old_iter.next(); } (Some(old), None) => { outgoing.push(old.clone()); old_i = old_iter.next(); } (_, Some(new)) => { incoming.push(new.clone()); new_i = new_iter.next(); } } } (incoming, outgoing) } /// Set the prime member. fn set_prime(_prime: Option<AccountId>) {} } impl<T: Clone + Ord> ChangeMembers<T> for () { fn change_members(_: &[T], _: &[T], _: Vec<T>) {} fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} fn set_members_sorted(_: &[T], _: &[T]) {} fn set_prime(_: Option<T>) {} } /// Trait for type that can handle the initialization of account IDs at genesis. pub trait InitializeMembers<AccountId> { /// Initialize the members to the given `members`. fn initialize_members(members: &[AccountId]); } impl<T> InitializeMembers<T> for () { fn initialize_members(_: &[T]) {} } // A trait that is able to provide randomness. pub trait Randomness<Output> { /// Get a "random" value /// /// Being a deterministic blockchain, real randomness is difficult to come by. This gives you /// something that approximates it. At best, this will be randomness which was /// hard to predict a long time ago, but that has become easy to predict recently. /// /// `subject` is a context identifier and allows you to get a /// different result to other callers of this function; use it like /// `random(&b"my context"[..])`. fn random(subject: &[u8]) -> Output; /// Get the basic random seed. /// /// In general you won't want to use this, but rather `Self::random` which allows you to give a /// subject for the random result and whose value will be independently low-influence random /// from any other such seeds. fn random_seed() -> Output { Self::random(&[][..]) } } /// Provides an implementation of [`Randomness`] that should only be used in tests! pub struct TestRandomness; impl<Output: Decode + Default> Randomness<Output> for TestRandomness { fn random(subject: &[u8]) -> Output { Output::decode(&mut TrailingZeroInput::new(subject)).unwrap_or_default() } } /// Trait to be used by block producing consensus engine modules to determine /// how late the current block is (e.g. in a slot-based proposal mechanism how /// many slots were skipped since the previous block). pub trait Lateness<N> { /// Returns a generic measure of how late the current block is compared to /// its parent. fn lateness(&self) -> N; } impl<N: Zero> Lateness<N> for () { fn lateness(&self) -> N { Zero::zero() } } /// Implementors of this trait provide information about whether or not some validator has /// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. pub trait ValidatorRegistration<ValidatorId> { /// Returns true if the provided validator ID has been registered with the implementing runtime /// module fn is_registered(id: &ValidatorId) -> bool; } /// Provides information about the pallet setup in the runtime. /// /// An implementor should be able to provide information about each pallet that /// is configured in `construct_runtime!`. pub trait PalletInfo { /// Convert the given pallet `P` into its index as configured in the runtime. fn index<P: 'static>() -> Option<usize>; /// Convert the given pallet `P` into its name as configured in the runtime. fn name<P: 'static>() -> Option<&'static str>; } impl PalletInfo for () { fn index<P: 'static>() -> Option<usize> { Some(0) } fn name<P: 'static>() -> Option<&'static str> { Some("test") } } /// The function and pallet name of the Call. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] pub struct CallMetadata { /// Name of the function. pub function_name: &'static str, /// Name of the pallet to which the function belongs. pub pallet_name: &'static str, } /// Gets the function name of the Call. pub trait GetCallName { /// Return all function names. fn get_call_names() -> &'static [&'static str]; /// Return the function name of the Call. fn get_call_name(&self) -> &'static str; } /// Gets the metadata for the Call - function name and pallet name. pub trait GetCallMetadata { /// Return all module names. fn get_module_names() -> &'static [&'static str]; /// Return all function names for the given `module`. fn get_call_names(module: &str) -> &'static [&'static str]; /// Return a [`CallMetadata`], containing function and pallet name of the Call. fn get_call_metadata(&self) -> CallMetadata; } /// The block finalization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is ending. #[impl_for_tuples(30)] pub trait OnFinalize<BlockNumber> { /// The block is being finalized. Implement to have something happen. /// /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, /// including inherent extrinsics. fn on_finalize(_n: BlockNumber) {} } /// The block initialization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is /// beginning (right before the first extrinsic is executed). pub trait OnInitialize<BlockNumber> { /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. /// /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, /// including inherent extrinsics. Hence for instance, if you runtime includes /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } } #[impl_for_tuples(30)] impl<BlockNumber: Clone> OnInitialize<BlockNumber> for Tuple { fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { let mut weight = 0; for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(_n.clone())); )* ); weight } } /// A trait that will be called at genesis. /// /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and /// will see the genesis state after all pallets have written their genesis state. #[impl_for_tuples(30)] pub trait OnGenesis { /// Something that should happen at genesis. fn on_genesis() {} } /// The runtime upgrade trait. /// /// Implementing this lets you express what should happen when the runtime upgrades, /// and changes may need to occur to your module. pub trait OnRuntimeUpgrade { /// Perform a module upgrade. /// /// # Warning /// /// This function will be called before we initialized any runtime state, aka `on_initialize` /// wasn't called yet. So, information like the block number and any other /// block local data are not accessible. /// /// Return the non-negotiable weight consumed for runtime upgrade. fn on_runtime_upgrade() -> crate::weights::Weight { 0 } } #[impl_for_tuples(30)] impl OnRuntimeUpgrade for Tuple { fn on_runtime_upgrade() -> crate::weights::Weight { let mut weight = 0; for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); weight } } /// Off-chain computation trait. /// /// Implementing this trait on a module allows you to perform long-running tasks /// that make (by default) validators generate transactions that feed results /// of those long-running computations back on chain. /// /// NOTE: This function runs off-chain, so it can access the block state, /// but cannot preform any alterations. More specifically alterations are /// not forbidden, but they are not persisted in any way after the worker /// has finished. #[impl_for_tuples(30)] pub trait OffchainWorker<BlockNumber> { /// This function is being called after every block import (when fully synced). /// /// Implement this and use any of the `Offchain` `sp_io` set of APIs /// to perform off-chain computations, calls and submit transactions /// with results to trigger any on-chain changes. /// Any state alterations are lost and are not persisted. fn offchain_worker(_n: BlockNumber) {} } pub mod schedule { use super::*; /// Information relating to the period of a scheduled task. First item is the length of the /// period and the second is the number of times it should be executed in total before the task /// is considered finished and removed. pub type Period<BlockNumber> = (BlockNumber, u32); /// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning /// higher priority. pub type Priority = u8; /// The dispatch time of a scheduled task. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub enum DispatchTime<BlockNumber> { /// At specified block. At(BlockNumber), /// After specified number of blocks. After(BlockNumber), } /// The highest priority. We invert the value so that normal sorting will place the highest /// priority at the beginning of the list. pub const HIGHEST_PRIORITY: Priority = 0; /// Anything of this value or lower will definitely be scheduled on the block that they ask for, even /// if it breaches the `MaximumWeight` limitation. pub const HARD_DEADLINE: Priority = 63; /// The lowest priority. Most stuff should be around here. pub const LOWEST_PRIORITY: Priority = 255; /// A type that can be used as a scheduler. pub trait Anon<BlockNumber, Call, Origin> { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + Debug; /// Schedule a dispatch to happen at the beginning of some block in the future. /// /// This is not named. fn schedule( when: DispatchTime<BlockNumber>, maybe_periodic: Option<Period<BlockNumber>>, priority: Priority, origin: Origin, call: Call ) -> Result<Self::Address, DispatchError>; /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, /// also. /// /// Will return an error if the `address` is invalid. /// /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. /// /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For /// that, you must name the task explicitly using the `Named` trait. fn cancel(address: Self::Address) -> Result<(), ()>; /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed /// only if it is executed *before* the currently scheduled block. For periodic tasks, /// this dispatch is guaranteed to succeed only before the *initial* execution; for /// others, use `reschedule_named`. /// /// Will return an error if the `address` is invalid. fn reschedule( address: Self::Address, when: DispatchTime<BlockNumber>, ) -> Result<Self::Address, DispatchError>; /// Return the next dispatch time for a given task. /// /// Will return an error if the `address` is invalid. fn next_dispatch_time(address: Self::Address) -> Result<BlockNumber, ()>; } /// A type that can be used as a scheduler. pub trait Named<BlockNumber, Call, Origin> { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; /// Schedule a dispatch to happen at the beginning of some block in the future. /// /// - `id`: The identity of the task. This must be unique and will return an error if not. fn schedule_named( id: Vec<u8>, when: DispatchTime<BlockNumber>, maybe_periodic: Option<Period<BlockNumber>>, priority: Priority, origin: Origin, call: Call ) -> Result<Self::Address, ()>; /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances /// of that, also. /// /// Will return an error if the `id` is invalid. /// /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. fn cancel_named(id: Vec<u8>) -> Result<(), ()>; /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed /// only if it is executed *before* the currently scheduled block. fn reschedule_named( id: Vec<u8>, when: DispatchTime<BlockNumber>, ) -> Result<Self::Address, DispatchError>; /// Return the next dispatch time for a given task. /// /// Will return an error if the `id` is invalid. fn next_dispatch_time(id: Vec<u8>) -> Result<BlockNumber, ()>; } } /// Some sort of check on the origin is performed by this object. pub trait EnsureOrigin<OuterOrigin> { /// A return type. type Success; /// Perform the origin check. fn ensure_origin(o: OuterOrigin) -> result::Result<Self::Success, BadOrigin> { Self::try_origin(o).map_err(|_| BadOrigin) } /// Perform the origin check. fn try_origin(o: OuterOrigin) -> result::Result<Self::Success, OuterOrigin>; /// Returns an outer origin capable of passing `try_origin` check. /// /// ** Should be used for benchmarking only!!! ** #[cfg(feature = "runtime-benchmarks")] fn successful_origin() -> OuterOrigin; } /// Type that can be dispatched with an origin but without checking the origin filter. /// /// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by /// `construct_runtime` and `impl_outer_dispatch`. pub trait UnfilteredDispatchable { /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). type Origin; /// Dispatch this call but do not check the filter in origin. fn dispatch_bypass_filter(self, origin: Self::Origin) -> crate::dispatch::DispatchResultWithPostInfo; } /// Methods available on `frame_system::Config::Origin`. pub trait OriginTrait: Sized { /// Runtime call type, as in `frame_system::Config::Call` type Call; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin; /// The AccountId used across the system. type AccountId; /// Add a filter to the origin. fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. fn reset_filter(&mut self); /// Replace the caller with caller from the other origin fn set_caller_from(&mut self, other: impl Into<Self>); /// Filter the call, if false then call is filtered out. fn filter_call(&self, call: &Self::Call) -> bool; /// Get the caller. fn caller(&self) -> &Self::PalletsOrigin; /// Create with system none origin and `frame-system::Config::BaseCallFilter`. fn none() -> Self; /// Create with system root origin and no filter. fn root() -> Self; /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. fn signed(by: Self::AccountId) -> Self; } /// Trait to be used when types are exactly same. /// /// This allow to convert back and forth from type, a reference and a mutable reference. pub trait IsType<T>: Into<T> + From<T> { /// Cast reference. fn from_ref(t: &T) -> &Self; /// Cast reference. fn into_ref(&self) -> &T; /// Cast mutable reference. fn from_mut(t: &mut T) -> &mut Self; /// Cast mutable reference. fn into_mut(&mut self) -> &mut T; } impl<T> IsType<T> for T { fn from_ref(t: &T) -> &Self { t } fn into_ref(&self) -> &T { self } fn from_mut(t: &mut T) -> &mut Self { t } fn into_mut(&mut self) -> &mut T { self } } /// An instance of a pallet in the storage. /// /// It is required that these instances are unique, to support multiple instances per pallet in the same runtime! /// /// E.g. for module MyModule default instance will have prefix "MyModule" and other instances /// "InstanceNMyModule". pub trait Instance: 'static { /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" const PREFIX: &'static str; } /// An instance of a storage in a pallet. /// /// Define an instance for an individual storage inside a pallet. /// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is /// used to isolate storages inside a pallet. /// /// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which /// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` pub trait StorageInstance { /// Prefix of a pallet to isolate it from other pallets. fn pallet_prefix() -> &'static str; /// Prefix given to a storage to isolate from other storages in the pallet. const STORAGE_PREFIX: &'static str; } /// Implement Get by returning Default for any type that implements Default. pub struct GetDefault; impl<T: Default> crate::traits::Get<T> for GetDefault { fn get() -> T { T::default() } } /// A trait similar to `Convert` to convert values from `B` an abstract balance type /// into u64 and back from u128. (This conversion is used in election and other places where complex /// calculation over balance type is needed) /// /// Total issuance of the currency is passed in, but an implementation of this trait may or may not /// use it. /// /// # WARNING /// /// the total issuance being passed in implies that the implementation must be aware of the fact /// that its values can affect the outcome. This implies that if the vote value is dependent on the /// total issuance, it should never ber written to storage for later re-use. pub trait CurrencyToVote<B> { /// Convert balance to u64. fn to_vote(value: B, issuance: B) -> u64; /// Convert u128 to balance. fn to_currency(value: u128, issuance: B) -> B; } /// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. /// /// The factor is the `(total_issuance / u64::max()).max(1)`, represented as u64. Let's look at the /// important cases: /// /// If the chain's total issuance is less than u64::max(), this will always be 1, which means that /// the factor will not have any effect. In this case, any account's balance is also less. Thus, /// both of the conversions are basically an `as`; Any balance can fit in u64. /// /// If the chain's total issuance is more than 2*u64::max(), then a factor might be multiplied and /// divided upon conversion. pub struct U128CurrencyToVote; impl U128CurrencyToVote { fn factor(issuance: u128) -> u128 { (issuance / u64::max_value() as u128).max(1) } } impl CurrencyToVote<u128> for U128CurrencyToVote { fn to_vote(value: u128, issuance: u128) -> u64 { (value / Self::factor(issuance)).saturated_into() } fn to_currency(value: u128, issuance: u128) -> u128 { value.saturating_mul(Self::factor(issuance)) } } /// A naive implementation of `CurrencyConvert` that simply saturates all conversions. /// /// # Warning /// /// This is designed to be used mostly for testing. Use with care, and think about the consequences. pub struct SaturatingCurrencyToVote; impl<B: UniqueSaturatedInto<u64> + UniqueSaturatedFrom<u128>> CurrencyToVote<B> for SaturatingCurrencyToVote { fn to_vote(value: B, _: B) -> u64 { value.unique_saturated_into() } fn to_currency(value: u128, _: B) -> B { B::unique_saturated_from(value) } } /// Something that can be checked to be a of sub type `T`. /// /// This is useful for enums where each variant encapsulates a different sub type, and /// you need access to these sub types. /// /// For example, in FRAME, this trait is implemented for the runtime `Call` enum. Pallets use this /// to check if a certain call is an instance of the local pallet's `Call` enum. /// /// # Example /// /// ``` /// # use frame_support::traits::IsSubType; /// /// enum Test { /// String(String), /// U32(u32), /// } /// /// impl IsSubType<String> for Test { /// fn is_sub_type(&self) -> Option<&String> { /// match self { /// Self::String(ref r) => Some(r), /// _ => None, /// } /// } /// } /// /// impl IsSubType<u32> for Test { /// fn is_sub_type(&self) -> Option<&u32> { /// match self { /// Self::U32(ref r) => Some(r), /// _ => None, /// } /// } /// } /// /// fn main() { /// let data = Test::String("test".into()); /// /// assert_eq!("test", IsSubType::<String>::is_sub_type(&data).unwrap().as_str()); /// } /// ``` pub trait IsSubType<T> { /// Returns `Some(_)` if `self` is an instance of sub type `T`. fn is_sub_type(&self) -> Option<&T>; } /// The pallet hooks trait. Implementing this lets you express some logic to execute. pub trait Hooks<BlockNumber> { /// The block is being finalized. Implement to have something happen. fn on_finalize(_n: BlockNumber) {} /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } /// Perform a module upgrade. /// /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it /// doesn't include the write of the pallet version in storage. The final complete logic /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by /// `Pallet`. /// /// # Warning /// /// This function will be called before we initialized any runtime state, aka `on_initialize` /// wasn't called yet. So, information like the block number and any other /// block local data are not accessible. /// /// Return the non-negotiable weight consumed for runtime upgrade. fn on_runtime_upgrade() -> crate::weights::Weight { 0 } /// Implementing this function on a module allows you to perform long-running tasks /// that make (by default) validators generate transactions that feed results /// of those long-running computations back on chain. /// /// NOTE: This function runs off-chain, so it can access the block state, /// but cannot preform any alterations. More specifically alterations are /// not forbidden, but they are not persisted in any way after the worker /// has finished. /// /// This function is being called after every block import (when fully synced). /// /// Implement this and use any of the `Offchain` `sp_io` set of APIs /// to perform off-chain computations, calls and submit transactions /// with results to trigger any on-chain changes. /// Any state alterations are lost and are not persisted. fn offchain_worker(_n: BlockNumber) {} /// Run integrity test. /// /// The test is not executed in a externalities provided environment. fn integrity_test() {} } /// A trait to define the build function of a genesis config, T and I are placeholder for pallet /// trait and pallet instance. #[cfg(feature = "std")] pub trait GenesisBuild<T, I=()>: Default + MaybeSerializeDeserialize { /// The build function is called within an externalities allowing storage APIs. /// Thus one can write to storage using regular pallet storages. fn build(&self); /// Build the storage using `build` inside default storage. fn build_storage(&self) -> Result<sp_runtime::Storage, String> { let mut storage = Default::default(); self.assimilate_storage(&mut storage)?; Ok(storage) } /// Assimilate the storage for this module into pre-existing overlays. fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { sp_state_machine::BasicExternalities::execute_with_storage(storage, || { self.build(); Ok(()) }) } } /// The storage key postfix that is used to store the [`PalletVersion`] per pallet. /// /// The full storage key is built by using: /// Twox128([`PalletInfo::name`]) ++ Twox128([`PALLET_VERSION_STORAGE_KEY_POSTFIX`]) pub const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; /// The version of a pallet. /// /// Each pallet version is stored in the state under a fixed key. See /// [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] for how this key is built. #[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy)] pub struct PalletVersion { /// The major version of the pallet. pub major: u16, /// The minor version of the pallet. pub minor: u8, /// The patch version of the pallet. pub patch: u8, } impl PalletVersion { /// Creates a new instance of `Self`. pub fn new(major: u16, minor: u8, patch: u8) -> Self { Self { major, minor, patch, } } /// Returns the storage key for a pallet version. /// /// See [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. /// /// Returns `None` if the given `PI` returned a `None` as name for the given /// `Pallet`. pub fn storage_key<PI: PalletInfo, Pallet: 'static>() -> Option<[u8; 32]> { let pallet_name = PI::name::<Pallet>()?; let pallet_name = sp_io::hashing::twox_128(pallet_name.as_bytes()); let postfix = sp_io::hashing::twox_128(PALLET_VERSION_STORAGE_KEY_POSTFIX); let mut final_key = [0u8; 32]; final_key[..16].copy_from_slice(&pallet_name); final_key[16..].copy_from_slice(&postfix); Some(final_key) } /// Put this pallet version into the storage. /// /// It will use the storage key that is associated with the given `Pallet`. /// /// # Panics /// /// This function will panic iff `Pallet` can not be found by `PalletInfo`. /// In a runtime that is put together using /// [`construct_runtime!`](crate::construct_runtime) this should never happen. /// /// It will also panic if this function isn't executed in an externalities /// provided environment. pub fn put_into_storage<PI: PalletInfo, Pallet: 'static>(&self) { let key = Self::storage_key::<PI, Pallet>() .expect("Every active pallet has a name in the runtime; qed"); crate::storage::unhashed::put(&key, self); } } impl sp_std::cmp::PartialOrd for PalletVersion { fn partial_cmp(&self, other: &Self) -> Option<sp_std::cmp::Ordering> { let res = self.major .cmp(&other.major) .then_with(|| self.minor .cmp(&other.minor) .then_with(|| self.patch.cmp(&other.patch) )); Some(res) } } /// Provides version information about a pallet. /// /// This trait provides two functions for returning the version of a /// pallet. There is a state where both functions can return distinct versions. /// See [`GetPalletVersion::storage_version`] for more information about this. pub trait GetPalletVersion { /// Returns the current version of the pallet. fn current_version() -> PalletVersion; /// Returns the version of the pallet that is stored in storage. /// /// Most of the time this will return the exact same version as /// [`GetPalletVersion::current_version`]. Only when being in /// a state after a runtime upgrade happened and the pallet did /// not yet updated its version in storage, this will return a /// different(the previous, seen from the time of calling) version. /// /// See [`PalletVersion`] for more information. /// /// # Note /// /// If there was no previous version of the pallet stored in the state, /// this function returns `None`. fn storage_version() -> Option<PalletVersion>; } #[cfg(test)] mod tests { use super::*; #[test] fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { struct Test; impl OnInitialize<u8> for Test { fn on_initialize(_n: u8) -> crate::weights::Weight { 10 } } impl OnRuntimeUpgrade for Test { fn on_runtime_upgrade() -> crate::weights::Weight { 20 } } assert_eq!(<(Test, Test)>::on_initialize(0), 20); assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); } #[test] fn check_pallet_version_ordering() { let version = PalletVersion::new(1, 0, 0); assert!(version > PalletVersion::new(0, 1, 2)); assert!(version == PalletVersion::new(1, 0, 0)); assert!(version < PalletVersion::new(1, 0, 1)); assert!(version < PalletVersion::new(1, 1, 0)); let version = PalletVersion::new(2, 50, 50); assert!(version < PalletVersion::new(2, 50, 51)); assert!(version > PalletVersion::new(2, 49, 51)); assert!(version < PalletVersion::new(3, 49, 51)); } }
34.773206
113
0.69276
5b86ad105fbaa4340ceb21b3ce243dc4f5418f96
546
#[doc = "Reader of register OUTR"] pub type R = crate::R<u32, super::OUTR>; #[doc = "Reader of field `DSR`"] pub type DSR_R = crate::R<u16, u16>; #[doc = "Reader of field `RCI`"] pub type RCI_R = crate::R<u8, u8>; impl R { #[doc = "Bits 0:15 - Received Data"] #[inline(always)] pub fn dsr(&self) -> DSR_R { DSR_R::new((self.bits & 0xffff) as u16) } #[doc = "Bits 16:20 - Receiver Control Information"] #[inline(always)] pub fn rci(&self) -> RCI_R { RCI_R::new(((self.bits >> 16) & 0x1f) as u8) } }
28.736842
56
0.564103
4b89904bc6fbfa3f34c5ce55b71e2057e1cb817f
1,392
use crate::*; /// A simple blocking waiter used by the generated bindings and should not be used directly. pub struct Waiter(RawPtr); pub struct WaiterSignaler(RawPtr); impl Waiter { pub fn new() -> (Waiter, WaiterSignaler) { unsafe { let handle = CreateEventW(std::ptr::null_mut(), 1, 0, std::ptr::null_mut()); (Waiter(handle), WaiterSignaler(handle)) } } } impl WaiterSignaler { /// Signals the `Waiter`. This is unsafe because the lifetime of `WaiterSignaler` is not tied /// to the lifetime of the `Waiter`. This is not possible in this case because the `Waiter` /// is used to signal a WinRT async completion and the compiler doesn't know that the lifetime /// of the delegate is bounded by the calling function. pub unsafe fn signal(&self) { // https://github.com/microsoft/winrt-rs/pull/374#discussion_r535313344 SetEvent(self.0); } } impl Drop for Waiter { fn drop(&mut self) { unsafe { WaitForSingleObject(self.0, 0xFFFFFFFF); CloseHandle(self.0); } } } #[link(name = "kernel32")] extern "system" { fn CreateEventW(security: RawPtr, manual: i32, state: i32, name: RawPtr) -> RawPtr; fn SetEvent(handle: RawPtr) -> i32; fn WaitForSingleObject(handle: RawPtr, milliseconds: u32) -> u32; fn CloseHandle(handle: RawPtr) -> i32; }
32.372093
98
0.64727
ffa6530feede7f4307d821a83be84b59a0e74115
1,913
use std::io::Cursor; use std::sync::Arc; use arrow2::array::Array; use arrow2::chunk::Chunk; use arrow2::datatypes::Schema; use arrow2::error::Result; use arrow2::io::ipc::read; use arrow2::io::ipc::write::file_async::FileSink; use arrow2::io::ipc::write::WriteOptions; use arrow2::io::ipc::IpcField; use futures::io::Cursor as AsyncCursor; use futures::SinkExt; use crate::io::ipc::common::read_arrow_stream; use crate::io::ipc::common::read_gzip_json; async fn write_( schema: &Schema, ipc_fields: &[IpcField], batches: &[Chunk<Arc<dyn Array>>], ) -> Result<Vec<u8>> { let mut result = AsyncCursor::new(vec![]); let options = WriteOptions { compression: None }; let mut sink = FileSink::new(&mut result, schema, Some(ipc_fields.to_vec()), options); for batch in batches { sink.feed((batch, Some(ipc_fields)).into()).await?; } sink.close().await?; drop(sink); Ok(result.into_inner()) } async fn test_file(version: &str, file_name: &str) -> Result<()> { let (schema, ipc_fields, batches) = read_arrow_stream(version, file_name); let result = write_(&schema, &ipc_fields, &batches).await?; let mut reader = Cursor::new(result); let metadata = read::read_file_metadata(&mut reader)?; let reader = read::FileReader::new(reader, metadata, None); let schema = &reader.metadata().schema; let ipc_fields = reader.metadata().ipc_schema.fields.clone(); // read expected JSON output let (expected_schema, expected_ipc_fields, expected_batches) = read_gzip_json(version, file_name).unwrap(); assert_eq!(schema, &expected_schema); assert_eq!(ipc_fields, expected_ipc_fields); let batches = reader.collect::<Result<Vec<_>>>()?; assert_eq!(batches, expected_batches); Ok(()) } #[tokio::test] async fn write_async() -> Result<()> { test_file("1.0.0-littleendian", "generated_primitive").await }
29.890625
90
0.67747
3922a894069e4c3cdeb1ed8840d1382d20fca77a
12,006
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use crate::core::feature::{Feature, FeatureAttrValType}; use crate::core::layer::Layer; use crate::core::screen; use crate::core::{geom, geom::GeometryType}; use crate::mvt::geom_encoder::{CommandSequence, EncodableGeom}; use crate::mvt::vector_tile; use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use protobuf::{error::ProtobufError, stream::CodedOutputStream, Message}; use std::fs::File; use std::io::{BufReader, Read, Write}; use tile_grid::Extent; pub struct Tile<'a> { pub mvt_tile: vector_tile::Tile, extent: &'a Extent, reverse_y: bool, } impl GeometryType { /// GeometryType to MVT geom type pub fn mvt_field_type(&self) -> vector_tile::Tile_GeomType { match self { &GeometryType::Point(_) => vector_tile::Tile_GeomType::POINT, &GeometryType::LineString(_) => vector_tile::Tile_GeomType::LINESTRING, &GeometryType::Polygon(_) => vector_tile::Tile_GeomType::POLYGON, &GeometryType::MultiPoint(_) => vector_tile::Tile_GeomType::POINT, &GeometryType::MultiLineString(_) => vector_tile::Tile_GeomType::LINESTRING, &GeometryType::MultiPolygon(_) => vector_tile::Tile_GeomType::POLYGON, &GeometryType::GeometryCollection(_) => vector_tile::Tile_GeomType::UNKNOWN, } } } pub trait ScreenGeom<T> { /// Convert geometry into screen coordinates fn from_geom(extent: &Extent, reverse_y: bool, tile_size: u32, geom: &T) -> Self; } impl ScreenGeom<geom::Point> for screen::Point { fn from_geom(extent: &Extent, reverse_y: bool, tile_size: u32, point: &geom::Point) -> Self { let x_span = extent.maxx - extent.minx; let y_span = extent.maxy - extent.miny; let mut screen_geom = screen::Point { x: ((point.x - extent.minx) * tile_size as f64 / x_span) as i32, y: ((point.y - extent.miny) * tile_size as f64 / y_span) as i32, }; if reverse_y { screen_geom.y = (tile_size as i32).saturating_sub(screen_geom.y) }; screen_geom } } impl ScreenGeom<geom::MultiPoint> for screen::MultiPoint { fn from_geom( extent: &Extent, reverse_y: bool, tile_size: u32, multipoint: &geom::MultiPoint, ) -> Self { let mut screen_geom = screen::MultiPoint { points: Vec::new() }; for point in &multipoint.points { screen_geom.points.push(screen::Point::from_geom( extent, reverse_y, tile_size, point, )); } screen_geom } } impl ScreenGeom<geom::LineString> for screen::LineString { fn from_geom( extent: &Extent, reverse_y: bool, tile_size: u32, line: &geom::LineString, ) -> Self { let mut screen_geom = screen::LineString { points: Vec::new() }; for point in &line.points { screen_geom.points.push(screen::Point::from_geom( extent, reverse_y, tile_size, point, )); } screen_geom.points.dedup(); screen_geom } } impl ScreenGeom<geom::MultiLineString> for screen::MultiLineString { fn from_geom( extent: &Extent, reverse_y: bool, tile_size: u32, multiline: &geom::MultiLineString, ) -> Self { let mut screen_geom = screen::MultiLineString { lines: Vec::new() }; for line in &multiline.lines { screen_geom.lines.push(screen::LineString::from_geom( extent, reverse_y, tile_size, line, )); } screen_geom } } impl ScreenGeom<geom::Polygon> for screen::Polygon { fn from_geom( extent: &Extent, reverse_y: bool, tile_size: u32, polygon: &geom::Polygon, ) -> Self { let mut screen_geom = screen::Polygon { rings: Vec::new() }; for line in &polygon.rings { screen_geom.rings.push(screen::LineString::from_geom( extent, reverse_y, tile_size, line, )); } screen_geom } } impl ScreenGeom<geom::MultiPolygon> for screen::MultiPolygon { fn from_geom( extent: &Extent, reverse_y: bool, tile_size: u32, multipolygon: &geom::MultiPolygon, ) -> Self { let mut screen_geom = screen::MultiPolygon { polygons: Vec::new(), }; for polygon in &multipolygon.polygons { screen_geom.polygons.push(screen::Polygon::from_geom( extent, reverse_y, tile_size, polygon, )); } screen_geom } } // --- Tile creation functions impl<'a> Tile<'a> { pub fn new(extent: &Extent, reverse_y: bool) -> Tile { let mvt_tile = vector_tile::Tile::new(); Tile { mvt_tile: mvt_tile, extent: extent, reverse_y: reverse_y, } } pub fn new_layer(&mut self, layer: &Layer) -> vector_tile::Tile_Layer { let mut mvt_layer = vector_tile::Tile_Layer::new(); mvt_layer.set_version(2); mvt_layer.set_name(layer.name.clone()); mvt_layer.set_extent(layer.tile_size); mvt_layer } pub fn encode_geom(&self, geom: geom::GeometryType, tile_size: u32) -> CommandSequence { match geom { GeometryType::Point(ref g) => { screen::Point::from_geom(&self.extent, self.reverse_y, tile_size, g).encode() } GeometryType::MultiPoint(ref g) => { screen::MultiPoint::from_geom(&self.extent, self.reverse_y, tile_size, g).encode() } GeometryType::LineString(ref g) => { screen::LineString::from_geom(&self.extent, self.reverse_y, tile_size, g).encode() } GeometryType::MultiLineString(ref g) => { screen::MultiLineString::from_geom(&self.extent, self.reverse_y, tile_size, g) .encode() } GeometryType::Polygon(ref g) => { screen::Polygon::from_geom(&self.extent, self.reverse_y, tile_size, g).encode() } GeometryType::MultiPolygon(ref g) => { screen::MultiPolygon::from_geom(&self.extent, self.reverse_y, tile_size, g).encode() } GeometryType::GeometryCollection(_) => panic!("GeometryCollection not supported"), } } pub fn add_feature_attribute( mvt_layer: &mut vector_tile::Tile_Layer, mvt_feature: &mut vector_tile::Tile_Feature, key: String, mvt_value: vector_tile::Tile_Value, ) { let keyentry = mvt_layer.get_keys().iter().position(|k| *k == key); // Optimization: maintain a hash table with key/index pairs let keyidx = match keyentry { None => { mvt_layer.mut_keys().push(key); mvt_layer.get_keys().len() - 1 } Some(idx) => idx, }; mvt_feature.mut_tags().push(keyidx as u32); let valentry = mvt_layer.get_values().iter().position(|v| *v == mvt_value); // Optimization: maintain a hash table with value/index pairs let validx = match valentry { None => { mvt_layer.mut_values().push(mvt_value); mvt_layer.get_values().len() - 1 } Some(idx) => idx, }; mvt_feature.mut_tags().push(validx as u32); } pub fn add_feature(&self, mut mvt_layer: &mut vector_tile::Tile_Layer, feature: &dyn Feature) { let mut mvt_feature = vector_tile::Tile_Feature::new(); if let Some(fid) = feature.fid() { mvt_feature.set_id(fid); } 'attr: for attr in feature.attributes() { let mut mvt_value = vector_tile::Tile_Value::new(); match attr.value { FeatureAttrValType::String(ref v) => { mvt_value.set_string_value(v.clone()); } FeatureAttrValType::Double(v) => { mvt_value.set_double_value(v); } FeatureAttrValType::Float(v) => { mvt_value.set_float_value(v); } FeatureAttrValType::Int(v) => { mvt_value.set_int_value(v); } FeatureAttrValType::UInt(v) => { mvt_value.set_uint_value(v); } FeatureAttrValType::SInt(v) => { mvt_value.set_sint_value(v); } FeatureAttrValType::Bool(v) => { mvt_value.set_bool_value(v); } FeatureAttrValType::VarcharArray(v) => { for array_val in v { Tile::add_feature_attribute( &mut mvt_layer, &mut mvt_feature, format!("{}.{}", attr.key.clone(), array_val), mvt_value.clone(), ); } continue 'attr; } } Tile::add_feature_attribute( &mut mvt_layer, &mut mvt_feature, attr.key.clone(), mvt_value, ); } if let Ok(geom) = feature.geometry() { let g_type = geom.mvt_field_type(); let enc_geom = self.encode_geom(geom, mvt_layer.get_extent()).vec(); if !enc_geom.is_empty() { mvt_feature.set_field_type(g_type); mvt_feature.set_geometry(enc_geom); mvt_layer.mut_features().push(mvt_feature); } } } pub fn add_layer(&mut self, mvt_layer: vector_tile::Tile_Layer) { self.mvt_tile.mut_layers().push(mvt_layer); } pub fn write_to(mut out: &mut dyn Write, mvt_tile: &vector_tile::Tile) { let mut os = CodedOutputStream::new(&mut out); let _ = mvt_tile.write_to(&mut os); os.flush().unwrap(); } pub fn write_gz_to(out: &mut dyn Write, mvt_tile: &vector_tile::Tile) { let mut gz = GzEncoder::new(out, Compression::default()); { let mut os = CodedOutputStream::new(&mut gz); let _ = mvt_tile.write_to(&mut os); os.flush().unwrap(); } let _ = gz.finish(); } pub fn read_from(fin: &mut dyn Read) -> Result<vector_tile::Tile, ProtobufError> { let mut reader = BufReader::new(fin); vector_tile::Tile::parse_from_reader(&mut reader) } pub fn read_gz_from(fin: &mut dyn Read) -> Result<vector_tile::Tile, ProtobufError> { let gz = GzDecoder::new(fin); let mut reader = BufReader::new(gz); vector_tile::Tile::parse_from_reader(&mut reader) } pub fn tile_bytevec(mvt_tile: &vector_tile::Tile) -> Vec<u8> { let mut v = Vec::with_capacity(mvt_tile.compute_size() as usize); Self::write_to(&mut v, mvt_tile); v } pub fn tile_bytevec_gz(mvt_tile: &vector_tile::Tile) -> Vec<u8> { let mut v = Vec::with_capacity(mvt_tile.compute_size() as usize); Self::write_gz_to(&mut v, &mvt_tile); v } pub fn tile_content(tilegz: Vec<u8>, gzip: bool) -> Vec<u8> { if gzip { tilegz } else { let mut gz = GzDecoder::new(&tilegz[..]); let mut unc_tile = Vec::with_capacity(tilegz.len()); let _ = gz.read_to_end(&mut unc_tile); unc_tile } } pub fn to_file(&self, fname: &str) { let mut f = File::create(fname).unwrap(); Self::write_to(&mut f, &self.mvt_tile); } pub fn size(mvt_tile: &vector_tile::Tile) -> u32 { mvt_tile.compute_size() } }
35.002915
101
0.558721
4abde1b8550896bf8e57f4a1be946b048f3780c6
21,985
//! Javascript context. pub mod intrinsics; use intrinsics::{IntrinsicObjects, Intrinsics}; use crate::{ builtins::{self, function::NativeFunctionSignature}, bytecompiler::ByteCompiler, class::{Class, ClassBuilder}, object::{FunctionBuilder, GlobalPropertyMap, JsObject, ObjectData}, property::{Attribute, PropertyDescriptor, PropertyKey}, realm::Realm, syntax::{ast::node::StatementList, parser::ParseError, Parser}, vm::{CallFrame, CodeBlock, FinallyReturn, GeneratorResumeKind, Vm}, JsResult, JsValue, }; use boa_gc::Gc; use boa_interner::{Interner, Sym}; use boa_profiler::Profiler; #[cfg(feature = "console")] use crate::builtins::console::Console; /// Javascript context. It is the primary way to interact with the runtime. /// /// `Context`s constructed in a thread share the same runtime, therefore it /// is possible to share objects from one context to another context, but they /// have to be in the same thread. /// /// # Examples /// /// ## Execute Function of Script File /// /// ```rust /// use boa_engine::{ /// Context, /// object::ObjectInitializer, /// property::{Attribute, PropertyDescriptor} /// }; /// /// let script = r#" /// function test(arg1) { /// if(arg1 != null) { /// return arg1.x; /// } /// return 112233; /// } /// "#; /// /// let mut context = Context::default(); /// /// // Populate the script definition to the context. /// context.eval(script).unwrap(); /// /// // Create an object that can be used in eval calls. /// let arg = ObjectInitializer::new(&mut context) /// .property("x", 12, Attribute::READONLY) /// .build(); /// context.register_global_property( /// "arg", /// arg, /// Attribute::all() /// ); /// /// let value = context.eval("test(arg)").unwrap(); /// /// assert_eq!(value.as_number(), Some(12.0)) /// ``` #[derive(Debug)] pub struct Context { /// realm holds both the global object and the environment pub(crate) realm: Realm, /// String interner in the context. interner: Interner, /// console object state. #[cfg(feature = "console")] console: Console, /// Intrinsic objects intrinsics: Intrinsics, /// Whether or not global strict mode is active. strict: bool, pub(crate) vm: Vm, } impl Default for Context { fn default() -> Self { let mut context = Self { realm: Realm::create(), interner: Interner::default(), #[cfg(feature = "console")] console: Console::default(), intrinsics: Intrinsics::default(), strict: false, vm: Vm { frame: None, stack: Vec::with_capacity(1024), trace: false, stack_size_limit: 1024, }, }; // Add new builtIns to Context Realm // At a later date this can be removed from here and called explicitly, // but for now we almost always want these default builtins context.intrinsics.objects = IntrinsicObjects::init(&mut context); context.create_intrinsics(); context } } impl Context { /// Create a new `Context`. #[inline] pub fn new(interner: Interner) -> Self { Self { interner, ..Self::default() } } /// Gets the string interner. #[inline] pub fn interner(&self) -> &Interner { &self.interner } /// Gets a mutable reference to the string interner. #[inline] pub fn interner_mut(&mut self) -> &mut Interner { &mut self.interner } /// A helper function for getting an immutable reference to the `console` object. #[cfg(feature = "console")] pub(crate) fn console(&self) -> &Console { &self.console } /// A helper function for getting a mutable reference to the `console` object. #[cfg(feature = "console")] #[inline] pub(crate) fn console_mut(&mut self) -> &mut Console { &mut self.console } /// Returns if strict mode is currently active. #[inline] pub fn strict(&self) -> bool { self.strict } /// Set the global strict mode of the context. #[inline] pub fn set_strict_mode(&mut self, strict: bool) { self.strict = strict; } /// Sets up the default global objects within Global #[inline] fn create_intrinsics(&mut self) { let _timer = Profiler::global().start_event("create_intrinsics", "interpreter"); // Create intrinsics, add global objects here builtins::init(self); } /// Constructs an object with the `%Object.prototype%` prototype. #[inline] pub fn construct_object(&self) -> JsObject { JsObject::from_proto_and_data( self.intrinsics().constructors().object().prototype(), ObjectData::ordinary(), ) } pub fn parse<S>(&mut self, src: S) -> Result<StatementList, ParseError> where S: AsRef<[u8]>, { Parser::new(src.as_ref(), self.strict).parse_all(&mut self.interner) } /// <https://tc39.es/ecma262/#sec-call> #[inline] pub(crate) fn call( &mut self, f: &JsValue, this: &JsValue, args: &[JsValue], ) -> JsResult<JsValue> { f.as_callable() .ok_or_else(|| self.construct_type_error("Value is not callable")) .and_then(|obj| obj.call(this, args, self)) } /// Return the global object. #[inline] pub fn global_object(&self) -> &JsObject { self.realm.global_object() } /// Return a reference to the global object string bindings. #[inline] pub(crate) fn global_bindings(&self) -> &GlobalPropertyMap { self.realm.global_bindings() } /// Return a mutable reference to the global object string bindings. #[inline] pub(crate) fn global_bindings_mut(&mut self) -> &mut GlobalPropertyMap { self.realm.global_bindings_mut() } /// Constructs a `Error` with the specified message. #[inline] pub fn construct_error<M>(&mut self, message: M) -> JsValue where M: Into<Box<str>>, { crate::builtins::error::Error::constructor( &self .intrinsics() .constructors() .error() .constructor() .into(), &[message.into().into()], self, ) .expect("Into<String> used as message") } /// Throws a `Error` with the specified message. #[inline] pub fn throw_error<M, R>(&mut self, message: M) -> JsResult<R> where M: Into<Box<str>>, { Err(self.construct_error(message)) } /// Constructs a `RangeError` with the specified message. #[inline] pub fn construct_range_error<M>(&mut self, message: M) -> JsValue where M: Into<Box<str>>, { crate::builtins::error::RangeError::constructor( &self .intrinsics() .constructors() .range_error() .constructor() .into(), &[message.into().into()], self, ) .expect("Into<String> used as message") } /// Throws a `RangeError` with the specified message. #[inline] pub fn throw_range_error<M, R>(&mut self, message: M) -> JsResult<R> where M: Into<Box<str>>, { Err(self.construct_range_error(message)) } /// Constructs a `TypeError` with the specified message. #[inline] pub fn construct_type_error<M>(&mut self, message: M) -> JsValue where M: Into<Box<str>>, { crate::builtins::error::TypeError::constructor( &self .intrinsics() .constructors() .type_error() .constructor() .into(), &[message.into().into()], self, ) .expect("Into<String> used as message") } /// Throws a `TypeError` with the specified message. #[inline] pub fn throw_type_error<M, R>(&mut self, message: M) -> JsResult<R> where M: Into<Box<str>>, { Err(self.construct_type_error(message)) } /// Constructs a `ReferenceError` with the specified message. #[inline] pub fn construct_reference_error<M>(&mut self, message: M) -> JsValue where M: Into<Box<str>>, { crate::builtins::error::ReferenceError::constructor( &self .intrinsics() .constructors() .reference_error() .constructor() .into(), &[message.into().into()], self, ) .expect("Into<String> used as message") } /// Throws a `ReferenceError` with the specified message. #[inline] pub fn throw_reference_error<M, R>(&mut self, message: M) -> JsResult<R> where M: Into<Box<str>>, { Err(self.construct_reference_error(message)) } /// Constructs a `SyntaxError` with the specified message. #[inline] pub fn construct_syntax_error<M>(&mut self, message: M) -> JsValue where M: Into<Box<str>>, { crate::builtins::error::SyntaxError::constructor( &self .intrinsics() .constructors() .syntax_error() .constructor() .into(), &[message.into().into()], self, ) .expect("Into<String> used as message") } /// Throws a `SyntaxError` with the specified message. #[inline] pub fn throw_syntax_error<M, R>(&mut self, message: M) -> JsResult<R> where M: Into<Box<str>>, { Err(self.construct_syntax_error(message)) } /// Constructs a `EvalError` with the specified message. pub fn construct_eval_error<M>(&mut self, message: M) -> JsValue where M: Into<Box<str>>, { crate::builtins::error::EvalError::constructor( &self .intrinsics() .constructors() .eval_error() .constructor() .into(), &[message.into().into()], self, ) .expect("Into<String> used as message") } /// Constructs a `URIError` with the specified message. pub fn construct_uri_error<M>(&mut self, message: M) -> JsValue where M: Into<Box<str>>, { crate::builtins::error::UriError::constructor( &self .intrinsics() .constructors() .uri_error() .constructor() .into(), &[message.into().into()], self, ) .expect("Into<String> used as message") } /// Throws a `EvalError` with the specified message. pub fn throw_eval_error<M, R>(&mut self, message: M) -> JsResult<R> where M: Into<Box<str>>, { Err(self.construct_eval_error(message)) } /// Throws a `URIError` with the specified message. pub fn throw_uri_error<M>(&mut self, message: M) -> JsResult<JsValue> where M: Into<Box<str>>, { Err(self.construct_uri_error(message)) } /// Register a global native function. /// /// This is more efficient that creating a closure function, since this does not allocate, /// it is just a function pointer. /// /// The function will be both `constructable` (call with `new`). /// /// The function will be bound to the global object with `writable`, `non-enumerable` /// and `configurable` attributes. The same as when you create a function in JavaScript. /// /// # Note /// /// If you want to make a function only `constructable`, or wish to bind it differently /// to the global object, you can create the function object with /// [`FunctionBuilder`](crate::object::FunctionBuilder::native). And bind it to the global /// object with [`Context::register_global_property`](Context::register_global_property) /// method. #[inline] pub fn register_global_function( &mut self, name: &str, length: usize, body: NativeFunctionSignature, ) { let function = FunctionBuilder::native(self, body) .name(name) .length(length) .constructor(true) .build(); self.global_bindings_mut().insert( name.into(), PropertyDescriptor::builder() .value(function) .writable(true) .enumerable(false) .configurable(true) .build(), ); } /// Register a global native function that is not a constructor. /// /// This is more efficient that creating a closure function, since this does not allocate, /// it is just a function pointer. /// /// The function will be bound to the global object with `writable`, `non-enumerable` /// and `configurable` attributes. The same as when you create a function in JavaScript. /// /// # Note /// /// The difference to [`Context::register_global_function`](Context::register_global_function) is, /// that the function will not be `constructable`. /// Usage of the function as a constructor will produce a `TypeError`. #[inline] pub fn register_global_builtin_function( &mut self, name: &str, length: usize, body: NativeFunctionSignature, ) { let function = FunctionBuilder::native(self, body) .name(name) .length(length) .constructor(false) .build(); self.global_bindings_mut().insert( name.into(), PropertyDescriptor::builder() .value(function) .writable(true) .enumerable(false) .configurable(true) .build(), ); } /// Register a global closure function. /// /// The function will be both `constructable` (call with `new`). /// /// The function will be bound to the global object with `writable`, `non-enumerable` /// and `configurable` attributes. The same as when you create a function in JavaScript. /// /// # Note #1 /// /// If you want to make a function only `constructable`, or wish to bind it differently /// to the global object, you can create the function object with /// [`FunctionBuilder`](crate::object::FunctionBuilder::closure). And bind it to the global /// object with [`Context::register_global_property`](Context::register_global_property) /// method. /// /// # Note #2 /// /// This function will only accept `Copy` closures, meaning you cannot /// move `Clone` types, just `Copy` types. If you need to move `Clone` types /// as captures, see [`FunctionBuilder::closure_with_captures`]. /// /// See <https://github.com/boa-dev/boa/issues/1515> for an explanation on /// why we need to restrict the set of accepted closures. #[inline] pub fn register_global_closure<F>(&mut self, name: &str, length: usize, body: F) -> JsResult<()> where F: Fn(&JsValue, &[JsValue], &mut Self) -> JsResult<JsValue> + Copy + 'static, { let function = FunctionBuilder::closure(self, body) .name(name) .length(length) .constructor(true) .build(); self.global_bindings_mut().insert( name.into(), PropertyDescriptor::builder() .value(function) .writable(true) .enumerable(false) .configurable(true) .build(), ); Ok(()) } /// <https://tc39.es/ecma262/#sec-hasproperty> #[inline] pub(crate) fn has_property(&mut self, obj: &JsValue, key: &PropertyKey) -> JsResult<bool> { if let Some(obj) = obj.as_object() { obj.__has_property__(key, self) } else { Ok(false) } } /// Register a global class of type `T`, where `T` implements `Class`. /// /// # Example /// ```ignore /// #[derive(Debug, Trace, Finalize)] /// struct MyClass; /// /// impl Class for MyClass { /// // ... /// } /// /// context.register_global_class::<MyClass>(); /// ``` #[inline] pub fn register_global_class<T>(&mut self) -> JsResult<()> where T: Class, { let mut class_builder = ClassBuilder::new::<T>(self); T::init(&mut class_builder)?; let class = class_builder.build(); let property = PropertyDescriptor::builder() .value(class) .writable(T::ATTRIBUTES.writable()) .enumerable(T::ATTRIBUTES.enumerable()) .configurable(T::ATTRIBUTES.configurable()) .build(); self.global_bindings_mut().insert(T::NAME.into(), property); Ok(()) } /// Register a global property. /// /// # Example /// ``` /// use boa_engine::{ /// Context, /// property::{Attribute, PropertyDescriptor}, /// object::ObjectInitializer /// }; /// /// let mut context = Context::default(); /// /// context.register_global_property( /// "myPrimitiveProperty", /// 10, /// Attribute::all() /// ); /// /// let object = ObjectInitializer::new(&mut context) /// .property( /// "x", /// 0, /// Attribute::all() /// ) /// .property( /// "y", /// 1, /// Attribute::all() /// ) /// .build(); /// context.register_global_property( /// "myObjectProperty", /// object, /// Attribute::all() /// ); /// ``` #[inline] pub fn register_global_property<K, V>(&mut self, key: K, value: V, attribute: Attribute) where K: Into<PropertyKey>, V: Into<JsValue>, { self.realm.global_property_map.insert( &key.into(), PropertyDescriptor::builder() .value(value) .writable(attribute.writable()) .enumerable(attribute.enumerable()) .configurable(attribute.configurable()) .build(), ); } /// Evaluates the given code by compiling down to bytecode, then interpreting the bytecode into a value /// /// # Examples /// ``` ///# use boa_engine::Context; /// let mut context = Context::default(); /// /// let value = context.eval("1 + 3").unwrap(); /// /// assert!(value.is_number()); /// assert_eq!(value.as_number().unwrap(), 4.0); /// ``` #[allow(clippy::unit_arg, clippy::drop_copy)] pub fn eval<S>(&mut self, src: S) -> JsResult<JsValue> where S: AsRef<[u8]>, { let main_timer = Profiler::global().start_event("Evaluation", "Main"); let parsing_result = Parser::new(src.as_ref(), false) .parse_all(&mut self.interner) .map_err(|e| e.to_string()); let statement_list = match parsing_result { Ok(statement_list) => statement_list, Err(e) => return self.throw_syntax_error(e), }; let code_block = self.compile(&statement_list)?; let result = self.execute(code_block); // The main_timer needs to be dropped before the Profiler is. drop(main_timer); Profiler::global().drop(); result } /// Compile the AST into a `CodeBlock` ready to be executed by the VM. #[inline] pub fn compile(&mut self, statement_list: &StatementList) -> JsResult<Gc<CodeBlock>> { let _timer = Profiler::global().start_event("Compilation", "Main"); let mut compiler = ByteCompiler::new(Sym::MAIN, statement_list.strict(), self); compiler.create_declarations(statement_list.items())?; compiler.compile_statement_list(statement_list.items(), true)?; Ok(Gc::new(compiler.finish())) } /// Call the VM with a `CodeBlock` and return the result. /// /// Since this function receives a `Gc<CodeBlock>`, cloning the code is very cheap, since it's /// just a pointer copy. Therefore, if you'd like to execute the same `CodeBlock` multiple /// times, there is no need to re-compile it, and you can just call `clone()` on the /// `Gc<CodeBlock>` returned by the [`Self::compile()`] function. #[inline] pub fn execute(&mut self, code_block: Gc<CodeBlock>) -> JsResult<JsValue> { let _timer = Profiler::global().start_event("Execution", "Main"); let global_object = self.global_object().clone().into(); self.vm.push_frame(CallFrame { prev: None, code: code_block, this: global_object, pc: 0, catch: Vec::new(), finally_return: FinallyReturn::None, finally_jump: Vec::new(), pop_on_return: 0, loop_env_stack: vec![0], try_env_stack: vec![crate::vm::TryStackEntry { num_env: 0, num_loop_stack_entries: 0, }], param_count: 0, arg_count: 0, generator_resume_kind: GeneratorResumeKind::Normal, }); self.realm.set_global_binding_number(); let result = self.run(); self.vm.pop_frame(); let (result, _) = result?; Ok(result) } /// Return the intrinsic constructors and objects. #[inline] pub fn intrinsics(&self) -> &Intrinsics { &self.intrinsics } /// Set the value of trace on the context pub fn set_trace(&mut self, trace: bool) { self.vm.trace = trace; } }
30.240715
107
0.559199
710c7ae407c0d56f6b248de300a6ff7281283cc5
1,087
#![feature(box_syntax)] #![feature(crate_visibility_modifier)] #![feature(try_blocks)] #![feature(type_ascription)] use std::path::Path; use anyhow::*; pub use self::{ config::*, engine::*, listener::*, provider::*, }; mod config; mod engine; mod engines; mod listener; mod provider; pub struct Sandbox { engine: Box<dyn SandboxEngine>, } impl Sandbox { pub fn new(engine: Box<dyn SandboxEngine>) -> Self { Self { engine } } pub async fn init(&mut self, listener: SandboxListener) -> Result<()> { self.engine.init(listener).await } pub async fn destroy(&mut self) -> Result<()> { self.engine.destroy().await } pub async fn exec(&mut self, cmd: &str) -> Result<()> { self.engine.exec(cmd).await } pub async fn fs_read(&mut self, path: impl AsRef<Path>) -> Result<String> { self.engine.fs_read(path.as_ref()).await } pub async fn fs_write(&mut self, path: impl AsRef<Path>, content: String) -> Result<()> { self.engine.fs_write(path.as_ref(), content).await } }
20.903846
93
0.614535
f4b9269e8cd48fef7e483cba8148472d0d1839d8
11,515
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use libc; use uint; use option::{Some, None}; use cell::Cell; use clone::Clone; use container::Container; use iterator::IteratorUtil; use vec::{OwnedVector, MutableVector}; use super::io::net::ip::{IpAddr, Ipv4, Ipv6}; use rt::sched::Scheduler; use rt::local::Local; use unstable::run_in_bare_thread; use rt::thread::Thread; use rt::task::Task; use rt::uv::uvio::UvEventLoop; use rt::work_queue::WorkQueue; use rt::sleeper_list::SleeperList; use rt::task::{Sched}; use rt::comm::oneshot; use result::{Result, Ok, Err}; pub fn new_test_uv_sched() -> Scheduler { let mut sched = Scheduler::new(~UvEventLoop::new(), WorkQueue::new(), SleeperList::new()); // Don't wait for the Shutdown message sched.no_sleep = true; return sched; } /// Creates a new scheduler in a new thread and runs a task in it, /// then waits for the scheduler to exit. Failure of the task /// will abort the process. pub fn run_in_newsched_task(f: ~fn()) { let f = Cell::new(f); do run_in_bare_thread { let mut sched = ~new_test_uv_sched(); let on_exit: ~fn(bool) = |exit_status| rtassert!(exit_status); let mut task = ~Task::new_root(&mut sched.stack_pool, f.take()); rtdebug!("newsched_task: %x", to_uint(task)); task.on_exit = Some(on_exit); sched.enqueue_task(task); sched.run(); } } /// Create more than one scheduler and run a function in a task /// in one of the schedulers. The schedulers will stay alive /// until the function `f` returns. pub fn run_in_mt_newsched_task(f: ~fn()) { use os; use from_str::FromStr; use rt::sched::Shutdown; use rt::util; let f_cell = Cell::new(f); do run_in_bare_thread { let nthreads = match os::getenv("RUST_TEST_THREADS") { Some(nstr) => FromStr::from_str(nstr).get(), None => { // Using more threads than cores in test code // to force the OS to preempt them frequently. // Assuming that this help stress test concurrent types. util::num_cpus() * 2 } }; let sleepers = SleeperList::new(); let work_queue = WorkQueue::new(); let mut handles = ~[]; let mut scheds = ~[]; for uint::range(0, nthreads) |_| { let loop_ = ~UvEventLoop::new(); let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); let handle = sched.make_handle(); handles.push(handle); scheds.push(sched); } let f_cell = Cell::new(f_cell.take()); let handles = Cell::new(handles); let on_exit: ~fn(bool) = |exit_status| { let mut handles = handles.take(); // Tell schedulers to exit for handles.mut_iter().advance |handle| { handle.send(Shutdown); } rtassert!(exit_status); }; let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, f_cell.take()); main_task.on_exit = Some(on_exit); scheds[0].enqueue_task(main_task); let mut threads = ~[]; while !scheds.is_empty() { let sched = scheds.pop(); let sched_cell = Cell::new(sched); let thread = do Thread::start { let sched = sched_cell.take(); sched.run(); }; threads.push(thread); } // Wait for schedulers let _threads = threads; } } /// Test tasks will abort on failure instead of unwinding pub fn spawntask(f: ~fn()) { use super::sched::*; let f = Cell::new(f); let task = unsafe { let sched = Local::unsafe_borrow::<Scheduler>(); rtdebug!("spawntask taking the scheduler from TLS"); do Local::borrow::<Task, ~Task>() |running_task| { ~running_task.new_child(&mut (*sched).stack_pool, f.take()) } }; rtdebug!("new task pointer: %x", to_uint(task)); let sched = Local::take::<Scheduler>(); rtdebug!("spawntask scheduling the new task"); sched.schedule_task(task); } /// Create a new task and run it right now. Aborts on failure pub fn spawntask_immediately(f: ~fn()) { use super::sched::*; let f = Cell::new(f); let task = unsafe { let sched = Local::unsafe_borrow::<Scheduler>(); do Local::borrow::<Task, ~Task>() |running_task| { ~running_task.new_child(&mut (*sched).stack_pool, f.take()) } }; let sched = Local::take::<Scheduler>(); do sched.switch_running_tasks_and_then(task) |sched, task| { sched.enqueue_task(task); } } /// Create a new task and run it right now. Aborts on failure pub fn spawntask_later(f: ~fn()) { use super::sched::*; let f = Cell::new(f); let task = unsafe { let sched = Local::unsafe_borrow::<Scheduler>(); do Local::borrow::<Task, ~Task>() |running_task| { ~running_task.new_child(&mut (*sched).stack_pool, f.take()) } }; let mut sched = Local::take::<Scheduler>(); sched.enqueue_task(task); Local::put(sched); } /// Spawn a task and either run it immediately or run it later pub fn spawntask_random(f: ~fn()) { use super::sched::*; use rand::{Rand, rng}; let f = Cell::new(f); let task = unsafe { let sched = Local::unsafe_borrow::<Scheduler>(); do Local::borrow::<Task, ~Task>() |running_task| { ~running_task.new_child(&mut (*sched).stack_pool, f.take()) } }; let mut sched = Local::take::<Scheduler>(); let mut rng = rng(); let run_now: bool = Rand::rand(&mut rng); if run_now { do sched.switch_running_tasks_and_then(task) |sched, task| { sched.enqueue_task(task); } } else { sched.enqueue_task(task); Local::put(sched); } } /// Spawn a task, with the current scheduler as home, and queue it to /// run later. pub fn spawntask_homed(scheds: &mut ~[~Scheduler], f: ~fn()) { use super::sched::*; use rand::{rng, RngUtil}; let mut rng = rng(); let task = { let sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)]; let handle = sched.make_handle(); let home_id = handle.sched_id; // now that we know where this is going, build a new function // that can assert it is in the right place let af: ~fn() = || { do Local::borrow::<Scheduler,()>() |sched| { rtdebug!("home_id: %u, runtime loc: %u", home_id, sched.sched_id()); assert!(home_id == sched.sched_id()); }; f() }; ~Task::new_root_homed(&mut sched.stack_pool, Sched(handle), af) }; let dest_sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)]; // enqueue it for future execution dest_sched.enqueue_task(task); } /// Spawn a task and wait for it to finish, returning whether it /// completed successfully or failed pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { use cell::Cell; use super::sched::*; let f = Cell::new(f); let (port, chan) = oneshot(); let chan = Cell::new(chan); let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status); let mut new_task = unsafe { let sched = Local::unsafe_borrow::<Scheduler>(); do Local::borrow::<Task, ~Task> |_running_task| { // I don't understand why using a child task here fails. I // think the fail status is propogating back up the task // tree and triggering a fail for the parent, which we // aren't correctly expecting. // ~running_task.new_child(&mut (*sched).stack_pool, ~Task::new_root(&mut (*sched).stack_pool, f.take()) } }; new_task.on_exit = Some(on_exit); let sched = Local::take::<Scheduler>(); do sched.switch_running_tasks_and_then(new_task) |sched, old_task| { sched.enqueue_task(old_task); } rtdebug!("enqueued the new task, now waiting on exit_status"); let exit_status = port.recv(); if exit_status { Ok(()) } else { Err(()) } } // Spawn a new task in a new scheduler and return a thread handle. pub fn spawntask_thread(f: ~fn()) -> Thread { use rt::sched::*; let f = Cell::new(f); let task = unsafe { let sched = Local::unsafe_borrow::<Scheduler>(); do Local::borrow::<Task, ~Task>() |running_task| { ~running_task.new_child(&mut (*sched).stack_pool, f.take()) } }; let task = Cell::new(task); let thread = do Thread::start { let mut sched = ~new_test_uv_sched(); sched.enqueue_task(task.take()); sched.run(); }; return thread; } /// Get a port number, starting at 9600, for use in tests pub fn next_test_port() -> u16 { unsafe { return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16; } extern { fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t; } } /// Get a unique IPv4 localhost:port pair starting at 9600 pub fn next_test_ip4() -> IpAddr { Ipv4(127, 0, 0, 1, next_test_port()) } /// Get a unique IPv6 localhost:port pair starting at 9600 pub fn next_test_ip6() -> IpAddr { Ipv6(0, 0, 0, 0, 0, 0, 0, 1, next_test_port()) } /* XXX: Welcome to MegaHack City. The bots run multiple builds at the same time, and these builds all want to use ports. This function figures out which workspace it is running in and assigns a port range based on it. */ fn base_port() -> uint { use os; use str::StrSlice; use to_str::ToStr; use vec::ImmutableVector; let base = 9600u; let range = 1000; let bases = [ ("32-opt", base + range * 1), ("32-noopt", base + range * 2), ("64-opt", base + range * 3), ("64-noopt", base + range * 4), ("64-opt-vg", base + range * 5), ("all-opt", base + range * 6), ("snap3", base + range * 7), ("dist", base + range * 8) ]; let path = os::getcwd().to_str(); let mut final_base = base; for bases.iter().advance |&(dir, base)| { if path.contains(dir) { final_base = base; break; } } return final_base; } /// Get a constant that represents the number of times to repeat /// stress tests. Default 1. pub fn stress_factor() -> uint { use os::getenv; match getenv("RUST_RT_STRESS") { Some(val) => uint::from_str(val).get(), None => 1 } }
29.375
75
0.566392
1ed65d4e613097772180a61b6aa910c1fe3cbba4
192
pub const CPU_RELEASE_ADDR: usize = usize::MAX; pub const COUNTER_FREQUENCY: usize = 24000000; pub const PERIPHERALS_START: usize = 0x08000000; pub const PERIPHERALS_END: usize = 0x40000000;
32
48
0.796875
f487eb3b49c982b907e30d52f31d96357d104938
970
use crate::core::sstorage::ImmutableString; use crate::renderer::framework::{ error::FrameworkError, gpu_program::{GpuProgram, UniformLocation}, state::PipelineState, }; pub struct SkyboxShader { pub program: GpuProgram, pub wvp_matrix: UniformLocation, pub cubemap_texture: UniformLocation, } impl SkyboxShader { pub fn new(state: &mut PipelineState) -> Result<Self, FrameworkError> { let fragment_source = include_str!("shaders/skybox_fs.glsl"); let vertex_source = include_str!("shaders/skybox_vs.glsl"); let program = GpuProgram::from_source(state, "SkyboxShader", vertex_source, fragment_source)?; Ok(Self { wvp_matrix: program .uniform_location(state, &ImmutableString::new("worldViewProjection"))?, cubemap_texture: program .uniform_location(state, &ImmutableString::new("cubemapTexture"))?, program, }) } }
32.333333
92
0.658763
dd1600c28360ea816694a9994704ebe734f6e7c9
5,707
#[doc = "Register `PUBLISH_PORT` reader"] pub struct R(crate::R<PUBLISH_PORT_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PUBLISH_PORT_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PUBLISH_PORT_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PUBLISH_PORT_SPEC>) -> Self { R(reader) } } #[doc = "Register `PUBLISH_PORT` writer"] pub struct W(crate::W<PUBLISH_PORT_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PUBLISH_PORT_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PUBLISH_PORT_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PUBLISH_PORT_SPEC>) -> Self { W(writer) } } #[doc = "Field `CHIDX` reader - Channel that event PORT will publish to."] pub struct CHIDX_R(crate::FieldReader<u8, u8>); impl CHIDX_R { pub(crate) fn new(bits: u8) -> Self { CHIDX_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for CHIDX_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CHIDX` writer - Channel that event PORT will publish to."] pub struct CHIDX_W<'a> { w: &'a mut W, } impl<'a> CHIDX_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x0f) | (value as u32 & 0x0f); self.w } } #[doc = "\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum EN_A { #[doc = "0: Disable publishing"] DISABLED = 0, #[doc = "1: Enable publishing"] ENABLED = 1, } impl From<EN_A> for bool { #[inline(always)] fn from(variant: EN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `EN` reader - "] pub struct EN_R(crate::FieldReader<bool, EN_A>); impl EN_R { pub(crate) fn new(bits: bool) -> Self { EN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> EN_A { match self.bits { false => EN_A::DISABLED, true => EN_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == EN_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == EN_A::ENABLED } } impl core::ops::Deref for EN_R { type Target = crate::FieldReader<bool, EN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `EN` writer - "] pub struct EN_W<'a> { w: &'a mut W, } impl<'a> EN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable publishing"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(EN_A::DISABLED) } #[doc = "Enable publishing"] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(EN_A::ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31); self.w } } impl R { #[doc = "Bits 0:3 - Channel that event PORT will publish to."] #[inline(always)] pub fn chidx(&self) -> CHIDX_R { CHIDX_R::new((self.bits & 0x0f) as u8) } #[doc = "Bit 31"] #[inline(always)] pub fn en(&self) -> EN_R { EN_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bits 0:3 - Channel that event PORT will publish to."] #[inline(always)] pub fn chidx(&mut self) -> CHIDX_W { CHIDX_W { w: self } } #[doc = "Bit 31"] #[inline(always)] pub fn en(&mut self) -> EN_W { EN_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Publish configuration for event PORT\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [publish_port](index.html) module"] pub struct PUBLISH_PORT_SPEC; impl crate::RegisterSpec for PUBLISH_PORT_SPEC { type Ux = u32; } #[doc = "`read()` method returns [publish_port::R](R) reader structure"] impl crate::Readable for PUBLISH_PORT_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [publish_port::W](W) writer structure"] impl crate::Writable for PUBLISH_PORT_SPEC { type Writer = W; } #[doc = "`reset()` method sets PUBLISH_PORT to value 0"] impl crate::Resettable for PUBLISH_PORT_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
29.117347
429
0.578062
09dcfa1b08752cb83663bf5caebdef2bd4038441
3,277
#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[repr(C)] pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>); impl<T> __BindgenUnionField<T> { #[inline] pub fn new() -> Self { __BindgenUnionField(::std::marker::PhantomData) } #[inline] pub unsafe fn as_ref(&self) -> &T { ::std::mem::transmute(self) } #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { ::std::mem::transmute(self) } } impl<T> ::std::default::Default for __BindgenUnionField<T> { #[inline] fn default() -> Self { Self::new() } } impl<T> ::std::clone::Clone for __BindgenUnionField<T> { #[inline] fn clone(&self) -> Self { Self::new() } } impl<T> ::std::marker::Copy for __BindgenUnionField<T> {} impl<T> ::std::fmt::Debug for __BindgenUnionField<T> { fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { fmt.write_str("__BindgenUnionField") } } impl<T> ::std::hash::Hash for __BindgenUnionField<T> { fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {} } impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> { fn eq(&self, _other: &__BindgenUnionField<T>) -> bool { true } } impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {} #[repr(C)] #[derive(Debug, Default, Copy, Hash, PartialEq, Eq)] pub struct foo { pub bar: __BindgenUnionField<foo__bindgen_ty_1>, pub bindgen_union_field: [u32; 2usize], } #[repr(C)] #[derive(Debug, Default, Copy, Hash, PartialEq, Eq)] pub struct foo__bindgen_ty_1 { pub a: ::std::os::raw::c_uint, pub b: ::std::os::raw::c_uint, } #[test] fn bindgen_test_layout_foo__bindgen_ty_1() { assert_eq!( ::std::mem::size_of::<foo__bindgen_ty_1>(), 8usize, concat!("Size of: ", stringify!(foo__bindgen_ty_1)) ); assert_eq!( ::std::mem::align_of::<foo__bindgen_ty_1>(), 4usize, concat!("Alignment of ", stringify!(foo__bindgen_ty_1)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<foo__bindgen_ty_1>())).a as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(foo__bindgen_ty_1), "::", stringify!(a) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<foo__bindgen_ty_1>())).b as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(foo__bindgen_ty_1), "::", stringify!(b) ) ); } impl Clone for foo__bindgen_ty_1 { fn clone(&self) -> Self { *self } } #[test] fn bindgen_test_layout_foo() { assert_eq!( ::std::mem::size_of::<foo>(), 8usize, concat!("Size of: ", stringify!(foo)) ); assert_eq!( ::std::mem::align_of::<foo>(), 4usize, concat!("Alignment of ", stringify!(foo)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<foo>())).bar as *const _ as usize }, 0usize, concat!("Offset of field: ", stringify!(foo), "::", stringify!(bar)) ); } impl Clone for foo { fn clone(&self) -> Self { *self } }
25.601563
80
0.549588
bf22b6f06139e41c539321e3781909a69cc052be
2,469
use voile_util::level::Level; use voile_util::loc::{Loc, ToLoc}; use voile_util::uid::GI; use crate::syntax::core::Pat; use super::{Tele, Term}; #[derive(Debug, PartialEq, Eq, Clone)] pub struct CodataInfo { pub loc: Loc, pub self_ref: String, pub name: String, pub params: Tele, /// References to its projections (fields). pub fields: Vec<GI>, pub level: Level, } #[derive(Debug, PartialEq, Eq, Clone)] pub struct ConsInfo { pub loc: Loc, pub name: String, pub params: Tele, pub data: GI, /// If this is a record constructor, /// we fill the fields' names here. pub fields: Option<Vec<String>>, } #[derive(Debug, PartialEq, Eq, Clone)] pub struct DataInfo { pub loc: Loc, pub name: String, pub params: Tele, /// References to its constructors. pub conses: Vec<GI>, pub level: Level, } /// Declaration. /// [Agda](https://hackage.haskell.org/package/Agda-2.6.0.1/docs/src/Agda.TypeChecking.Monad.Base.html#Function). #[derive(Debug, PartialEq, Eq, Clone)] pub enum Decl { /// Datatypes. Data(DataInfo), /// Coinductive records. Codata(CodataInfo), Cons(ConsInfo), Proj { loc: Loc, name: String, codata: GI, ty: Term, }, /// Function definitions. Func { loc: Loc, name: String, signature: Term, clauses: Vec<Clause>, }, } impl Decl { pub fn def_name(&self) -> &String { match self { Decl::Proj { name, .. } | Decl::Func { name, .. } => name, Decl::Data(i) => &i.name, Decl::Cons(i) => &i.name, Decl::Codata(i) => &i.name, } } } impl ToLoc for Decl { fn loc(&self) -> Loc { match self { Decl::Proj { loc, .. } | Decl::Func { loc, .. } => *loc, Decl::Data(i) => i.loc(), Decl::Cons(i) => i.loc(), Decl::Codata(i) => i.loc(), } } } /// Function clauses. /// [Agda](https://hackage.haskell.org/package/Agda-2.6.0.1/docs/src/Agda.Syntax.Internal.html#Clause). #[derive(Debug, PartialEq, Eq, Clone)] pub struct Clause { /// $\Delta$. The types of the pattern variables in dependency order. pat_tele: Tele, /// $\Delta \vdash ps$. The de Bruijn indices refer to $\Delta$. patterns: Vec<Pat>, /// `Some(v)` if $\Delta \vdash v$, while `None` if the patterns are absurd. body: Option<Term>, // TODO: case-trees. }
24.939394
113
0.567031
755a5e916c8087fc1af5b34f6c031808893446e8
4,962
/* * Rust-SFML - Copyright (c) 2013 Letang Jeremy. * * The original software, SFML library, is provided by Laurent Gomila. * * This software is provided 'as-is', without any express or implied warranty. * In no event will the authors be held liable for any damages arising from * the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not claim * that you wrote the original software. If you use this software in a product, * an acknowledgment in the product documentation would be appreciated but is * not required. * * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * * 3. This notice may not be removed or altered from any source distribution. */ //! Utility Class providing 2 dimensional vectors for i32, u32, and f32. use std::ops::{Add, Sub, Mul, Div}; /// Implementation of Vector2i #[repr(C)] #[derive(Clone, PartialOrd, Ord, PartialEq, Eq, Debug, Copy)] pub struct Vector2<T> { /// X coordinate of the vector. pub x: T, /// Y coordinate of the vector. pub y: T } /// export Vector2<i32> as Vector2i pub type Vector2i = Vector2<i32>; /// export Vector2<u32> as Vector2u pub type Vector2u = Vector2<u32>; /// export Vector2<f32> as Vector2f pub type Vector2f = Vector2<f32>; impl<T> Vector2<T> { /// Build a new Vector2<T> pub fn new(x: T, y: T) -> Vector2<T> { Vector2 { x: x, y: y } } } impl<T: Add + Copy> Add<T> for Vector2<T> { type Output = Vector2<T::Output>; fn add(self, rhs: T) -> Vector2<T::Output> { Vector2 { x: self.x + rhs, y: self.y + rhs } } } impl<T: Sub + Copy> Sub<T> for Vector2<T> { type Output = Vector2<T::Output>; fn sub(self, rhs: T) -> Vector2<T::Output> { Vector2 { x: self.x - rhs, y: self.y - rhs } } } impl<T: Mul + Copy> Mul<T> for Vector2<T> { type Output = Vector2<T::Output>; fn mul(self, rhs: T) -> Vector2<T::Output> { Vector2 { x: self.x * rhs, y: self.y * rhs } } } impl<T: Div + Copy> Div<T> for Vector2<T> { type Output = Vector2<T::Output>; fn div(self, rhs: T) -> Vector2<T::Output> { Vector2 { x: self.x / rhs, y: self.y / rhs } } } impl<T: Add> Add for Vector2<T> { type Output = Vector2<T::Output>; fn add(self, rhs: Vector2<T>) -> Vector2<T::Output> { Vector2 { x: self.x + rhs.x, y: self.y + rhs.y } } } impl<T: Sub> Sub for Vector2<T> { type Output = Vector2<T::Output>; fn sub(self, rhs: Vector2<T>) -> Vector2<T::Output> { Vector2 { x: self.x - rhs.x, y: self.y - rhs.y } } } impl<T: Mul> Mul for Vector2<T> { type Output = Vector2<T::Output>; fn mul(self, rhs: Vector2<T>) -> Vector2<T::Output> { Vector2 { x: self.x * rhs.x, y: self.y * rhs.y } } } impl<T: Div> Div for Vector2<T> { type Output = Vector2<T::Output>; fn div(self, rhs: Vector2<T>) -> Vector2<T::Output> { Vector2 { x: self.x / rhs.x, y: self.y / rhs.y } } } /// Utility trait to convert a Vector2 on another type pub trait ToVec { /// Convert the current Vector2 to a Vector2f fn to_vector2f(&self) -> Vector2f; /// Convert the current Vector2 to a Vector2i fn to_vector2i(&self) -> Vector2i; /// Convert the current Vector2f to a Vector2u fn to_vector2u(&self) -> Vector2u; } impl ToVec for Vector2f { fn to_vector2f(&self) -> Vector2f { self.clone() } fn to_vector2i(&self) -> Vector2i { Vector2i { x: self.x as i32, y: self.y as i32 } } fn to_vector2u(&self) -> Vector2u { Vector2u { x: self.x as u32, y: self.y as u32 } } } impl ToVec for Vector2i { fn to_vector2f(&self) -> Vector2f { Vector2f { x: self.x as f32, y: self.y as f32 } } fn to_vector2i(&self) -> Vector2i { self.clone() } fn to_vector2u(&self) -> Vector2u { Vector2u { x: self.x as u32, y: self.y as u32 } } } impl ToVec for Vector2u { fn to_vector2f(&self) -> Vector2f { Vector2f { x: self.x as f32, y: self.y as f32 } } fn to_vector2i(&self) -> Vector2i { Vector2i { x: self.x as i32, y: self.y as i32 } } fn to_vector2u(&self) -> Vector2u { self.clone() } }
23.07907
81
0.552801
e9764547628c052cd22db69dbf5424e619881db4
1,868
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /// Creates a `Vec` containing the arguments. /// /// `vec!` allows `Vec`s to be defined with the same syntax as array expressions. /// There are two forms of this macro: /// /// - Create a `Vec` containing a given list of elements: /// /// ``` /// let v = vec![1, 2, 3]; /// assert_eq!(v[0], 1); /// assert_eq!(v[1], 2); /// assert_eq!(v[2], 3); /// ``` /// /// - Create a `Vec` from a given element and size: /// /// ``` /// let v = vec![1; 3]; /// assert_eq!(v, [1, 1, 1]); /// ``` /// /// Note that unlike array expressions this syntax supports all elements /// which implement `Clone` and the number of elements doesn't have to be /// a constant. #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] macro_rules! vec { ($elem:expr; $n:expr) => ( $crate::vec::from_elem($elem, $n) ); ($($x:expr),*) => ( <[_] as $crate::slice::SliceExt>::into_vec( $crate::boxed::Box::new([$($x),*])) ); ($($x:expr,)*) => (vec![$($x),*]) } /// Use the syntax described in `std::fmt` to create a value of type `String`. /// See `std::fmt` for more information. /// /// # Example /// /// ``` /// format!("test"); /// format!("hello {}", "world!"); /// format!("x = {}, y = {y}", 10, y = 30); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] macro_rules! format { ($($arg:tt)*) => ($crate::fmt::format(format_args!($($arg)*))) }
29.650794
81
0.588865
cc21e5af0317fb4ca0edbe6fe7628a34bc98519f
3,529
// Copyright 2022 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 use std::{collections::VecDeque, ops::RangeInclusive}; use async_trait::async_trait; use chronicle::{ db::{model::tangle::MilestoneIndex, MongoDb}, runtime::{Actor, ActorContext, ActorError, ConfigureActor, HandleEvent, Report}, }; use inx::{client::InxClient, tonic::Channel}; use super::{InxError, MilestoneStream}; // The Syncer starts at a certain milestone index in the past and moves forwards in time trying to sync as many // milestones as possible - including their cones. pub struct Syncer { gaps: Gaps, db: MongoDb, inx_client: InxClient<Channel>, } impl Syncer { pub fn new(gaps: Vec<RangeInclusive<MilestoneIndex>>, db: MongoDb, inx_client: InxClient<Channel>) -> Self { Self { gaps: Gaps(gaps.into()), db, inx_client, } } } #[derive(Debug, Default)] pub struct Gaps(VecDeque<RangeInclusive<MilestoneIndex>>); impl Iterator for Gaps { type Item = RangeInclusive<MilestoneIndex>; fn next(&mut self) -> Option<Self::Item> { while let Some(range) = self.0.pop_front() { if range.start() <= range.end() { return Some(range); } } None } } #[async_trait] impl Actor for Syncer { type State = (); type Error = InxError; async fn init(&mut self, _cx: &mut ActorContext<Self>) -> Result<Self::State, Self::Error> { Ok(()) } fn name(&self) -> std::borrow::Cow<'static, str> { "Syncer".into() } } #[async_trait] impl HandleEvent<Report<MilestoneStream>> for Syncer { async fn handle_event( &mut self, cx: &mut ActorContext<Self>, event: Report<MilestoneStream>, _state: &mut Self::State, ) -> Result<(), Self::Error> { // Start syncing the next milestone range cx.delay(SyncNext, None)?; match event { Report::Success(_) => (), Report::Error(report) => match report.error { ActorError::Result(e) => { Err(e)?; } ActorError::Aborted | ActorError::Panic => { cx.abort().await; } }, } Ok(()) } } pub struct SyncNext; #[async_trait] impl HandleEvent<SyncNext> for Syncer { async fn handle_event( &mut self, cx: &mut ActorContext<Self>, _evt: SyncNext, _state: &mut Self::State, ) -> Result<(), Self::Error> { if let Some(milestone_range) = self.gaps.next() { log::info!( "Requesting unsynced milestone range {}..{}.", milestone_range.start(), milestone_range.end() ); let milestone_stream = self .inx_client .listen_to_confirmed_milestones(inx::proto::MilestoneRangeRequest::from( *milestone_range.start()..=*milestone_range.end(), )) .await? .into_inner(); cx.spawn_child( MilestoneStream::new( self.db.clone(), self.inx_client.clone(), *milestone_range.start()..=*milestone_range.end(), ) .with_stream(milestone_stream), ) .await; } else { log::info!("Sync complete"); cx.shutdown(); } Ok(()) } }
27.787402
112
0.53953
1a1f985ae486fb4cf021dff0b3be82967e8edf60
2,747
use std::collections::HashMap; use std::hash::Hash; use crate::frame::IntoBytes; use crate::types::value::Value; use crate::types::CString; /// Enum that represents two types of query values: /// * values without name /// * values with names #[derive(Debug, Clone)] pub enum QueryValues { SimpleValues(Vec<Value>), NamedValues(HashMap<String, Value>), } impl QueryValues { /// It returns `true` if query values is with names and `false` otherwise. pub fn with_names(&self) -> bool { match *self { QueryValues::SimpleValues(_) => false, _ => true, } } /// It return number of values. pub fn len(&self) -> usize { match *self { QueryValues::SimpleValues(ref v) => v.len(), QueryValues::NamedValues(ref m) => m.len(), } } fn named_value_into_bytes_fold(mut bytes: Vec<u8>, vals: (&String, &Value)) -> Vec<u8> { let mut name_bytes = CString::new(vals.0.clone()).into_cbytes(); let mut vals_bytes = vals.1.into_cbytes(); bytes.append(&mut name_bytes); bytes.append(&mut vals_bytes); bytes } fn value_into_bytes_fold(mut bytes: Vec<u8>, val: &Value) -> Vec<u8> { let mut val_bytes = val.into_cbytes(); bytes.append(&mut val_bytes); bytes } } impl<T: Into<Value> + Clone> From<Vec<T>> for QueryValues { /// It converts values from `Vec` to query values without names `QueryValues::SimpleValues`. fn from(values: Vec<T>) -> QueryValues { let vals = values.iter().map(|v| v.clone().into()); QueryValues::SimpleValues(vals.collect()) } } impl<'a, T: Into<Value> + Clone> From<&'a [T]> for QueryValues { /// It converts values from `Vec` to query values without names `QueryValues::SimpleValues`. fn from(values: &'a [T]) -> QueryValues { let vals = values.iter().map(|v| v.clone().into()); QueryValues::SimpleValues(vals.collect()) } } impl<S: ToString + Hash + Eq, V: Into<Value> + Clone> From<HashMap<S, V>> for QueryValues { /// It converts values from `HashMap` to query values with names `QueryValues::NamedValues`. fn from(values: HashMap<S, V>) -> QueryValues { let map: HashMap<String, Value> = HashMap::with_capacity(values.len()); let _values = values.iter().fold(map, |mut acc, v| { let name = v.0; let val = v.1; acc.insert(name.to_string(), val.clone().into()); acc }); QueryValues::NamedValues(_values) } } impl IntoBytes for QueryValues { fn into_cbytes(&self) -> Vec<u8> { let bytes: Vec<u8> = vec![]; match *self { QueryValues::SimpleValues(ref v) => v.iter().fold(bytes, QueryValues::value_into_bytes_fold), QueryValues::NamedValues(ref v) => v .iter() .fold(bytes, QueryValues::named_value_into_bytes_fold), } } }
30.522222
99
0.642519
3a37f7b7d215b5fd8c45f6072d6ba53600089f1b
8,608
#[doc = r" Value read from the register"] pub struct R { bits: u16, } #[doc = r" Value to write to the register"] pub struct W { bits: u16, } impl super::TCD15_ATTR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get() } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct DSIZER { bits: u8, } impl DSIZER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct DMODR { bits: u8, } impl DMODR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = "Possible values of the field `SSIZE`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SSIZER { #[doc = "8-bit"] _0, #[doc = "16-bit"] _1, #[doc = "32-bit"] _10, #[doc = r" Reserved"] _Reserved(u8), } impl SSIZER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { SSIZER::_0 => 0, SSIZER::_1 => 1, SSIZER::_10 => 2, SSIZER::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> SSIZER { match value { 0 => SSIZER::_0, 1 => SSIZER::_1, 2 => SSIZER::_10, i => SSIZER::_Reserved(i), } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SSIZER::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SSIZER::_1 } #[doc = "Checks if the value of the field is `_10`"] #[inline] pub fn is_10(&self) -> bool { *self == SSIZER::_10 } } #[doc = "Possible values of the field `SMOD`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SMODR { #[doc = "Source address modulo feature is disabled"] _0, #[doc = r" Reserved"] _Reserved(u8), } impl SMODR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { SMODR::_0 => 0, SMODR::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> SMODR { match value { 0 => SMODR::_0, i => SMODR::_Reserved(i), } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SMODR::_0 } } #[doc = r" Proxy"] pub struct _DSIZEW<'a> { w: &'a mut W, } impl<'a> _DSIZEW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 7; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u16) << OFFSET); self.w.bits |= ((value & MASK) as u16) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _DMODW<'a> { w: &'a mut W, } impl<'a> _DMODW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 31; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u16) << OFFSET); self.w.bits |= ((value & MASK) as u16) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SSIZE`"] pub enum SSIZEW { #[doc = "8-bit"] _0, #[doc = "16-bit"] _1, #[doc = "32-bit"] _10, } impl SSIZEW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { SSIZEW::_0 => 0, SSIZEW::_1 => 1, SSIZEW::_10 => 2, } } } #[doc = r" Proxy"] pub struct _SSIZEW<'a> { w: &'a mut W, } impl<'a> _SSIZEW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SSIZEW) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = "8-bit"] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SSIZEW::_0) } #[doc = "16-bit"] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SSIZEW::_1) } #[doc = "32-bit"] #[inline] pub fn _10(self) -> &'a mut W { self.variant(SSIZEW::_10) } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 7; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u16) << OFFSET); self.w.bits |= ((value & MASK) as u16) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SMOD`"] pub enum SMODW { #[doc = "Source address modulo feature is disabled"] _0, } impl SMODW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { SMODW::_0 => 0, } } } #[doc = r" Proxy"] pub struct _SMODW<'a> { w: &'a mut W, } impl<'a> _SMODW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SMODW) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = "Source address modulo feature is disabled"] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SMODW::_0) } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 31; const OFFSET: u8 = 11; self.w.bits &= !((MASK as u16) << OFFSET); self.w.bits |= ((value & MASK) as u16) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u16 { self.bits } #[doc = "Bits 0:2 - Destination data transfer size"] #[inline] pub fn dsize(&self) -> DSIZER { let bits = { const MASK: u8 = 7; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u16) as u8 }; DSIZER { bits } } #[doc = "Bits 3:7 - Destination Address Modulo"] #[inline] pub fn dmod(&self) -> DMODR { let bits = { const MASK: u8 = 31; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u16) as u8 }; DMODR { bits } } #[doc = "Bits 8:10 - Source data transfer size"] #[inline] pub fn ssize(&self) -> SSIZER { SSIZER::_from({ const MASK: u8 = 7; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u16) as u8 }) } #[doc = "Bits 11:15 - Source Address Modulo"] #[inline] pub fn smod(&self) -> SMODR { SMODR::_from({ const MASK: u8 = 31; const OFFSET: u8 = 11; ((self.bits >> OFFSET) & MASK as u16) as u8 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u16) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:2 - Destination data transfer size"] #[inline] pub fn dsize(&mut self) -> _DSIZEW { _DSIZEW { w: self } } #[doc = "Bits 3:7 - Destination Address Modulo"] #[inline] pub fn dmod(&mut self) -> _DMODW { _DMODW { w: self } } #[doc = "Bits 8:10 - Source data transfer size"] #[inline] pub fn ssize(&mut self) -> _SSIZEW { _SSIZEW { w: self } } #[doc = "Bits 11:15 - Source Address Modulo"] #[inline] pub fn smod(&mut self) -> _SMODW { _SMODW { w: self } } }
24.524217
59
0.490474
237997efbf270dc0034e0ca6caf66005717a04fd
1,704
use crate::Error; use serde::{Deserialize, Deserializer}; pub fn check_username(username: &str) -> Result<(), Error> { if valid_username(username) { Ok(()) } else { Err(Error::UsernameMalformed) } } fn valid_username(username: &str) -> bool { const ALLOWED_SPECIAL_CHARS: &str = ".-_ "; if username.is_empty() { return false; } username .chars() .all(|c| c.is_alphanumeric() || ALLOWED_SPECIAL_CHARS.contains(c)) } pub fn serde_parse_f64_option<'de, D>(deserializer: D) -> Result<Option<f64>, D::Error> where D: Deserializer<'de>, { let opt: Option<String> = Option::deserialize(deserializer)?; match opt { Some(s) => parse_f64(&s).map(Some), None => Ok(None), } } fn parse_f64<E: serde::de::Error>(s: &str) -> Result<f64, E> { s.parse().map_err(serde::de::Error::custom) } #[cfg(test)] mod tests { use super::{check_username, serde_parse_f64_option}; #[test] fn test_check_username() { let valid = "Hello.Hello Hello-Hello_Hello"; let invalids = ["", "Hello/Hello", "Hello#Hello"]; assert!(check_username(valid).is_ok()); for &invalid in invalids.iter() { assert!(check_username(invalid).is_err()); } } #[test] fn test_parse_f64() { #[derive(serde::Deserialize)] struct Test { #[serde(deserialize_with = "serde_parse_f64_option")] value: Option<f64>, } let json = r#" { "value": "0.000017" } "#; let test: Test = serde_json::from_str(json).unwrap(); assert_eq!(test.value.unwrap(), 0.000017); } }
23.027027
87
0.568075
1695756512f585b78cd33948631673614d87de56
3,003
use super::{Error, Store}; use crate::forwards_iter::SimpleForwardsBlockRootsIterator; use crate::impls::beacon_state::{get_full_state, store_full_state}; use parking_lot::RwLock; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; use types::*; type DBHashMap = HashMap<Vec<u8>, Vec<u8>>; /// A thread-safe `HashMap` wrapper. pub struct MemoryStore<E: EthSpec> { db: RwLock<DBHashMap>, _phantom: PhantomData<E>, } impl<E: EthSpec> Clone for MemoryStore<E> { fn clone(&self) -> Self { Self { db: RwLock::new(self.db.read().clone()), _phantom: PhantomData, } } } impl<E: EthSpec> MemoryStore<E> { /// Create a new, empty database. pub fn open() -> Self { Self { db: RwLock::new(HashMap::new()), _phantom: PhantomData, } } fn get_key_for_col(col: &str, key: &[u8]) -> Vec<u8> { let mut col = col.as_bytes().to_vec(); col.append(&mut key.to_vec()); col } } impl<E: EthSpec> Store<E> for MemoryStore<E> { type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator; /// Get the value of some key from the database. Returns `None` if the key does not exist. fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> { let column_key = Self::get_key_for_col(col, key); Ok(self .db .read() .get(&column_key) .and_then(|val| Some(val.clone()))) } /// Puts a key in the database. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().insert(column_key, val.to_vec()); Ok(()) } /// Return true if some key exists in some column. fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> { let column_key = Self::get_key_for_col(col, key); Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().remove(&column_key); Ok(()) } /// Store a state in the store. fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> { store_full_state(self, state_root, state) } /// Fetch a state from the store. fn get_state( &self, state_root: &Hash256, _: Option<Slot>, ) -> Result<Option<BeaconState<E>>, Error> { get_full_state(self, state_root) } fn forwards_block_roots_iterator( store: Arc<Self>, start_slot: Slot, end_state: BeaconState<E>, end_block_root: Hash256, _: &ChainSpec, ) -> Self::ForwardsBlockRootsIterator { SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root) } }
28.330189
94
0.594739
48a1ef3959aad037eaa1b5962b7f60bbba08015b
11,744
use std::collections::HashMap; use std::rc::Rc; use rustc_serialize::{Decodable, Decoder}; use animation::AnimationClip; use transform::{Transform, FromTransform}; use blend_tree::{AnimBlendTree, BlendTreeNodeDef, ClipId}; use skeleton::Skeleton; const MAX_JOINTS: usize = 64; /// A state that an AnimationController can be in, consisting /// of a blend tree and a collection of transitions to other states pub struct AnimationState<T: Transform> { /// The blend tree used to determine the final blended pose for this state pub blend_tree: AnimBlendTree<T>, /// Transitions from this state to other AnimationStates pub transitions: Vec<AnimationTransition>, } /// Representation of a state transition to a target state, with a condition and a duration #[derive(Debug, Clone, RustcDecodable)] pub struct AnimationTransition { /// The name of the target state to transition to pub target_state: String, /// The condition that will be checked in order to determine /// if the controller should transition to the target state pub condition: TransitionCondition, /// The duration of the transition, during which a linear blend /// transition between the current and target states should occur pub duration: f32, } /// Representation of a condition to check for an AnimationTransition #[derive(Debug, Clone, RustcDecodable)] pub struct TransitionCondition { /// The name of the controller parameter to compare with pub parameter: String, /// The comparision operator to use pub operator: Operator, /// The constant value to compare with the controller parameter value pub value: f32, } impl TransitionCondition { /// Returns true if the condition is satisfied pub fn is_true(&self, parameters: &HashMap<String, f32>) -> bool { match self.operator { Operator::LessThan => parameters[&self.parameter[..]] < self.value, Operator::GreaterThan => parameters[&self.parameter[..]] > self.value, Operator::LessThanEqual => parameters[&self.parameter[..]] <= self.value, Operator::GreaterThanEqual => parameters[&self.parameter[..]] >= self.value, Operator::Equal => parameters[&self.parameter[..]] == self.value, Operator::NotEqual => parameters[&self.parameter[..]] != self.value, } } } #[derive(Debug, Clone)] pub enum Operator { LessThan, LessThanEqual, GreaterThan, GreaterThanEqual, Equal, NotEqual, } impl Decodable for Operator { fn decode<D: Decoder>(decoder: &mut D) -> Result<Operator, D::Error> { match &try!(decoder.read_str())[..] { "<" => Ok(Operator::LessThan), ">" => Ok(Operator::GreaterThan), "<=" => Ok(Operator::LessThanEqual), ">=" => Ok(Operator::GreaterThanEqual), "=" => Ok(Operator::Equal), "!=" => Ok(Operator::NotEqual), _ => Ok(Operator::Equal), // FIXME -- figure out how to throw a D::Error... } } } /// Definition struct for an AnimationController, which can be deserialized from JSON /// and converted to an AnimationController instance at runtime #[derive(Clone, Debug, RustcDecodable)] pub struct AnimationControllerDef { /// Identifying name for the controller definition pub name: String, /// Declaration list of all parameters that are used by the AnimationController, /// including state transition conditions and blend tree parameters pub parameters: Vec<String>, /// List of animation state definitions pub states: Vec<AnimationStateDef>, /// The name of the state that the AnimationController should start in pub initial_state: String, } /// Definition struct for an AnimationState, which can be deserialized from JSON /// and converted to an AnimationState instance at runtime #[derive(Clone, Debug)] pub struct AnimationStateDef { /// The identifying name for the state pub name: String, /// The blend tree definition for this state pub blend_tree: BlendTreeNodeDef, /// The transitions to other states that can occur from this state pub transitions: Vec<AnimationTransition>, } impl Decodable for AnimationStateDef { fn decode<D: Decoder>(decoder: &mut D) -> Result<AnimationStateDef, D::Error> { decoder.read_struct("root", 0, |decoder| { let name = try!(decoder.read_struct_field("name", 0, |decoder| { Ok(try!(decoder.read_str())) })); let blend_tree = try!(decoder.read_struct_field("blend_tree", 0, Decodable::decode)); let transitions = try!(decoder.read_struct_field("transitions", 0, |decoder| { decoder.read_seq(|decoder, len| { let mut transitions = Vec::new(); for i in 0 .. len { transitions.push(try!(decoder.read_seq_elt(i, Decodable::decode))); } Ok(transitions) }) })); Ok(AnimationStateDef { name: name, blend_tree: blend_tree, transitions: transitions, }) }) } } /// A runtime representation of an Animation State Machine, consisting of one or more /// AnimationStates connected by AnimationTransitions, where the output animation /// pose depends on the current state or any active transitions between states. pub struct AnimationController<T: Transform> { /// Parameters that will be referenced by blend tree nodes and animation states parameters: HashMap<String, f32>, /// Shared reference to the skeleton this controller is using skeleton: Rc<Skeleton>, /// Tracks seconds since controller started running local_clock: f64, /// Playback speed multiplier. playback_speed: f64, /// Mapping of all animation state names to their instances states: HashMap<String, AnimationState<T>>, /// The name of the current active AnimationState current_state: String, /// The current active AnimationTransition and its start time, if any transition: Option<(f64, AnimationTransition)>, } impl<T: Transform> AnimationController<T> { /// Create an AnimationController instance from its definition, the desired skeleton, and a /// collection of currently loaded animation clips. pub fn new(controller_def: AnimationControllerDef, skeleton: Rc<Skeleton>, animations: &HashMap<ClipId, Rc<AnimationClip<T>>>) -> AnimationController<T> { let mut parameters = HashMap::new(); for parameter in controller_def.parameters.iter() { parameters.insert(parameter.clone(), 0.0); }; let mut states = HashMap::new(); for state_def in controller_def.states.iter() { let mut blend_tree = AnimBlendTree::from_def(state_def.blend_tree.clone(), animations, skeleton.clone()); blend_tree.synchronize(0.0, &parameters); states.insert(state_def.name.clone(), AnimationState { blend_tree: blend_tree, transitions: state_def.transitions.clone() }); } AnimationController { parameters: parameters, skeleton: skeleton.clone(), local_clock: 0.0, playback_speed: 1.0, states: states, current_state: controller_def.initial_state, transition: None, } } /// Update the controller's local clock with the given time delta pub fn update(&mut self, delta_time: f64) { self.local_clock += delta_time * self.playback_speed; } /// Checks if controller should transition to a different state, or if currently /// in a transition, checks if the transition is complete fn update_state(&mut self, ext_dt: f64) { match self.transition.clone() { Some((ref start_time, ref transition)) => { // If transition is finished, switch state to new transition if self.local_clock + ext_dt >= start_time + transition.duration as f64{ self.current_state = transition.target_state.clone(); self.transition = None; } }, None => { // Check for any transitions with passing conditions let current_state = &self.states[&self.current_state[..]]; for transition in current_state.transitions.iter() { if transition.condition.is_true(&self.parameters) { self.transition = Some((self.local_clock + ext_dt, transition.clone())); break; } } } } } /// Set the playback speed for the controller pub fn set_playback_speed(&mut self, speed: f64) { self.playback_speed = speed; } /// Set the value for the given controller parameter pub fn set_param_value(&mut self, name: &str, value: f32) { self.parameters.insert(name.to_string(), value); // :( } /// Return the value for the given controller parameter pub fn get_param_value(&self, name: &str) -> f32 { self.parameters[name] } /// Return a read-only reference to the controller parameter map pub fn get_parameters(&self) -> &HashMap<String, f32> { &self.parameters } /// Calculate global skeletal joint poses for the given time since last update pub fn get_output_pose<TOutput: Transform + FromTransform<T>>(&mut self, ext_dt: f64, output_poses: &mut [TOutput]) { self.update_state(ext_dt); let elapsed_time = self.local_clock + ext_dt * self.playback_speed; let mut local_poses = [ T::identity(); MAX_JOINTS ]; { let current_state = self.states.get_mut(&self.current_state[..]).unwrap(); current_state.blend_tree.synchronize(elapsed_time as f32, &self.parameters); current_state.blend_tree.get_output_pose(elapsed_time as f32, &self.parameters, &mut local_poses[..]); } if let Some((transition_start_time, ref transition)) = self.transition { // Blend with the target state ... let mut target_poses = [ T::identity(); MAX_JOINTS ]; let target_state = self.states.get_mut(&transition.target_state[..]).unwrap(); target_state.blend_tree.synchronize(elapsed_time as f32, &self.parameters); target_state.blend_tree.get_output_pose(elapsed_time as f32, &self.parameters, &mut target_poses[..]); let blend_parameter = ((self.local_clock + ext_dt - transition_start_time) / transition.duration as f64) as f32; for i in 0 .. output_poses.len() { let pose_1 = &mut local_poses[i]; let pose_2 = target_poses[i]; *pose_1 = pose_1.lerp(pose_2, blend_parameter); } } self.calculate_global_poses(&local_poses[..], output_poses); } /// Calculate global poses from the controller's skeleton and the given local poses fn calculate_global_poses<TOutput: Transform + FromTransform<T>>( &self, local_poses: &[T], global_poses: &mut [TOutput], ) { for (joint_index, joint) in self.skeleton.joints.iter().enumerate() { let parent_pose = if !joint.is_root() { global_poses[joint.parent_index as usize] } else { TOutput::identity() }; let local_pose = local_poses[joint_index]; global_poses[joint_index] = parent_pose.concat(TOutput::from_transform(local_pose)); } } }
35.696049
158
0.636325
d50a4e9c780955cca58fd0dd7ef26177b259975e
16,548
use crate::{ helpers::{is_fee_amount_packable, pack_fee_amount}, AccountId, Nonce, TxFeeTypes, }; use crate::account::PubKeyHash; use anyhow::ensure; use num::{BigUint, Zero}; use parity_crypto::Keccak256; use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, TokenId, H256}; use zksync_crypto::{ params::{max_account_id, max_token_id}, PrivateKey, }; use zksync_utils::{format_units, BigUintSerdeAsRadix10Str}; use super::{PackedEthSignature, TimeRange, TxSignature, VerifiedSignatureCache}; use crate::tokens::ChangePubKeyFeeTypeArg; #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Hash, Eq)] pub enum ChangePubKeyType { Onchain, ECDSA, CREATE2, } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ChangePubKeyECDSAData { pub eth_signature: PackedEthSignature, #[serde(default)] pub batch_hash: H256, } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ChangePubKeyCREATE2Data { pub creator_address: Address, pub salt_arg: H256, pub code_hash: H256, } impl ChangePubKeyCREATE2Data { pub fn get_address(&self, pubkey_hash: &PubKeyHash) -> Address { let salt = { let mut bytes = Vec::new(); bytes.extend_from_slice(self.salt_arg.as_bytes()); bytes.extend_from_slice(&pubkey_hash.data); bytes.keccak256() }; let mut bytes = Vec::new(); bytes.push(0xff); bytes.extend_from_slice(self.creator_address.as_bytes()); bytes.extend_from_slice(&salt); bytes.extend_from_slice(self.code_hash.as_bytes()); Address::from_slice(&bytes.keccak256()[12..]) } } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type")] pub enum ChangePubKeyEthAuthData { Onchain, ECDSA(ChangePubKeyECDSAData), CREATE2(ChangePubKeyCREATE2Data), } impl ChangePubKeyEthAuthData { pub fn is_ecdsa(&self) -> bool { matches!(self, ChangePubKeyEthAuthData::ECDSA(..)) } pub fn is_onchain(&self) -> bool { matches!(self, ChangePubKeyEthAuthData::Onchain) } pub fn is_create2(&self) -> bool { matches!(self, ChangePubKeyEthAuthData::CREATE2(..)) } pub fn get_eth_witness(&self) -> Vec<u8> { match self { ChangePubKeyEthAuthData::Onchain => Vec::new(), ChangePubKeyEthAuthData::ECDSA(ChangePubKeyECDSAData { eth_signature, .. }) => { let mut bytes = Vec::new(); bytes.push(0x00); bytes.extend_from_slice(&eth_signature.serialize_packed()); // bytes.extend_from_slice(batch_hash.as_bytes()); bytes } ChangePubKeyEthAuthData::CREATE2(ChangePubKeyCREATE2Data { creator_address, salt_arg, code_hash, }) => { let mut bytes = Vec::new(); bytes.push(0x01); bytes.extend_from_slice(creator_address.as_bytes()); bytes.extend_from_slice(salt_arg.as_bytes()); bytes.extend_from_slice(code_hash.as_bytes()); bytes } } } pub fn get_fee_type(&self) -> ChangePubKeyType { match self { ChangePubKeyEthAuthData::Onchain => ChangePubKeyType::Onchain, ChangePubKeyEthAuthData::ECDSA(_) => ChangePubKeyType::ECDSA, ChangePubKeyEthAuthData::CREATE2(_) => ChangePubKeyType::CREATE2, } } } /// `ChangePubKey` transaction is used to set the owner's public key hash /// associated with the account. /// /// Without public key hash set, account is unable to execute any L2 transactions. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ChangePubKey { /// zkSync network account ID to apply operation to. pub account_id: AccountId, /// Address of the account. pub account: Address, /// Public key hash to set. pub new_pk_hash: PubKeyHash, /// Token to be used for fee. #[serde(default)] pub fee_token: TokenId, /// Fee for the transaction. #[serde(with = "BigUintSerdeAsRadix10Str", default)] pub fee: BigUint, /// Current account nonce. pub nonce: Nonce, /// Transaction zkSync signature. Must be signed with the key corresponding to the /// `new_pk_hash` value. This signature is required to ensure that `fee_token` and `fee` /// fields can't be changed by an attacker. #[serde(default)] pub signature: TxSignature, /// Transaction Ethereum signature. It may be `None` if `ChangePubKey` operation is authorized /// onchain, otherwise the message must be signed by the Ethereum private key corresponding /// to the account address. pub eth_signature: Option<PackedEthSignature>, /// Data needed to check if Ethereum address authorized ChangePubKey operation pub eth_auth_data: Option<ChangePubKeyEthAuthData>, /// Time range when the transaction is valid /// This fields must be Option<...> because of backward compatibility with first version of ZkSync #[serde(flatten)] pub time_range: Option<TimeRange>, #[serde(skip)] cached_signer: VerifiedSignatureCache, } impl ChangePubKey { /// Unique identifier of the transaction type in zkSync network. pub const TX_TYPE: u8 = 7; /// Creates transaction from all the required fields. /// /// While `signature` field is mandatory for new transactions, it may be `None` /// in some cases (e.g. when restoring the network state from the L1 contract data). #[allow(clippy::too_many_arguments)] pub fn new( account_id: AccountId, account: Address, new_pk_hash: PubKeyHash, fee_token: TokenId, fee: BigUint, nonce: Nonce, time_range: TimeRange, signature: Option<TxSignature>, eth_signature: Option<PackedEthSignature>, ) -> Self { // TODO: support CREATE2 (ZKS-452) let eth_auth_data = Some( eth_signature .map(|eth_signature| { ChangePubKeyEthAuthData::ECDSA(ChangePubKeyECDSAData { eth_signature, batch_hash: H256::zero(), }) }) .unwrap_or(ChangePubKeyEthAuthData::Onchain), ); let mut tx = Self { account_id, account, new_pk_hash, fee_token, fee, nonce, signature: signature.clone().unwrap_or_default(), eth_signature: None, eth_auth_data, cached_signer: VerifiedSignatureCache::NotCached, time_range: Some(time_range), }; if signature.is_some() { tx.cached_signer = VerifiedSignatureCache::Cached(tx.verify_signature()); } tx } /// Creates a signed transaction using private key and /// checks for the transaction correcteness. #[allow(clippy::too_many_arguments)] pub fn new_signed( account_id: AccountId, account: Address, new_pk_hash: PubKeyHash, fee_token: TokenId, fee: BigUint, nonce: Nonce, time_range: TimeRange, eth_signature: Option<PackedEthSignature>, private_key: &PrivateKey, ) -> Result<Self, anyhow::Error> { let mut tx = Self::new( account_id, account, new_pk_hash, fee_token, fee, nonce, time_range, None, eth_signature, ); tx.signature = TxSignature::sign_musig(private_key, &tx.get_bytes()); if !tx.check_correctness() { anyhow::bail!(crate::tx::TRANSACTION_SIGNATURE_ERROR); } Ok(tx) } /// Restores the `PubKeyHash` from the transaction signature. pub fn verify_signature(&self) -> Option<PubKeyHash> { if let VerifiedSignatureCache::Cached(cached_signer) = &self.cached_signer { *cached_signer } else { self.signature .verify_musig(&self.get_bytes()) .map(|pub_key| PubKeyHash::from_pubkey(&pub_key)) } } /// Encodes the transaction data as the byte sequence according to the zkSync protocol. pub fn get_bytes(&self) -> Vec<u8> { let mut out = Vec::new(); out.extend_from_slice(&[Self::TX_TYPE]); out.extend_from_slice(&self.account_id.to_be_bytes()); out.extend_from_slice(&self.account.as_bytes()); out.extend_from_slice(&self.new_pk_hash.data); out.extend_from_slice(&self.fee_token.to_be_bytes()); out.extend_from_slice(&pack_fee_amount(&self.fee)); out.extend_from_slice(&self.nonce.to_be_bytes()); if let Some(time_range) = &self.time_range { out.extend_from_slice(&time_range.to_be_bytes()); } out } /// Provides a message to be signed with the Ethereum private key. pub fn get_eth_signed_data(&self) -> Result<Vec<u8>, anyhow::Error> { // Fee data is not included into ETH signature input, since it would require // to either have more chunks in pubdata (if fee amount is unpacked), unpack // fee on contract (if fee amount is packed), or display non human-readable // amount in message (if fee amount is packed and is not unpacked on contract). // Either of these options is either non user-friendly or increase cost of // operation. Instead, fee data is signed via zkSync signature, which is essentially // free. This signature will be verified in the circuit. const CHANGE_PUBKEY_SIGNATURE_LEN: usize = 60; let mut eth_signed_msg = Vec::with_capacity(CHANGE_PUBKEY_SIGNATURE_LEN); eth_signed_msg.extend_from_slice(&self.new_pk_hash.data); eth_signed_msg.extend_from_slice(&self.nonce.to_be_bytes()); eth_signed_msg.extend_from_slice(&self.account_id.to_be_bytes()); // In case this transaction is not part of a batch, we simply append zeros. if let Some(ChangePubKeyEthAuthData::ECDSA(ChangePubKeyECDSAData { batch_hash, .. })) = self.eth_auth_data { eth_signed_msg.extend_from_slice(batch_hash.as_bytes()); } else { eth_signed_msg.extend_from_slice(H256::default().as_bytes()); } ensure!( eth_signed_msg.len() == CHANGE_PUBKEY_SIGNATURE_LEN, "Change pubkey signed message does not match in size: {}, expected: {}", eth_signed_msg.len(), CHANGE_PUBKEY_SIGNATURE_LEN ); Ok(eth_signed_msg) } /// Provides an old message to be signed with the Ethereum private key. pub fn get_old_eth_signed_data(&self) -> Result<Vec<u8>, anyhow::Error> { // Fee data is not included into ETH signature input, since it would require // to either have more chunks in pubdata (if fee amount is unpacked), unpack // fee on contract (if fee amount is packed), or display non human-readable // amount in message (if fee amount is packed and is not unpacked on contract). // Either of these options is either non user-friendly or increase cost of // operation. Instead, fee data is signed via zkSync signature, which is essentially // free. This signature will be verified in the circuit. const CHANGE_PUBKEY_SIGNATURE_LEN: usize = 152; let mut eth_signed_msg = Vec::with_capacity(CHANGE_PUBKEY_SIGNATURE_LEN); eth_signed_msg.extend_from_slice(b"Register zkSync pubkey:\n\n"); eth_signed_msg.extend_from_slice( format!( "{pubkey}\n\ nonce: 0x{nonce}\n\ account id: 0x{account_id}\ \n\n", pubkey = hex::encode(&self.new_pk_hash.data).to_ascii_lowercase(), nonce = hex::encode(&self.nonce.to_be_bytes()).to_ascii_lowercase(), account_id = hex::encode(&self.account_id.to_be_bytes()).to_ascii_lowercase() ) .as_bytes(), ); eth_signed_msg.extend_from_slice(b"Only sign this message for a trusted client!"); ensure!( eth_signed_msg.len() == CHANGE_PUBKEY_SIGNATURE_LEN, "Change pubkey signed message len is too big: {}, expected: {}", eth_signed_msg.len(), CHANGE_PUBKEY_SIGNATURE_LEN ); Ok(eth_signed_msg) } pub fn is_eth_auth_data_valid(&self) -> bool { if let Some(eth_auth_data) = &self.eth_auth_data { match eth_auth_data { ChangePubKeyEthAuthData::Onchain => true, // Should query Ethereum to check it ChangePubKeyEthAuthData::ECDSA(ChangePubKeyECDSAData { eth_signature, .. }) => { let recovered_address = self .get_eth_signed_data() .ok() .and_then(|msg| eth_signature.signature_recover_signer(&msg).ok()); recovered_address == Some(self.account) } ChangePubKeyEthAuthData::CREATE2(create2_data) => { let create2_address = create2_data.get_address(&self.new_pk_hash); create2_address == self.account } } } else if let Some(old_eth_signature) = &self.eth_signature { let recovered_address = self .get_old_eth_signed_data() .ok() .and_then(|msg| old_eth_signature.signature_recover_signer(&msg).ok()); recovered_address == Some(self.account) } else { true } } /// Verifies the transaction correctness: /// /// - Ethereum signature (if set) must correspond to the account address. /// - zkSync signature must correspond to the `new_pk_hash` field of the transaction. /// - `account_id` field must be within supported range. /// - `fee_token` field must be within supported range. /// - `fee` field must represent a packable value. pub fn check_correctness(&self) -> bool { self.is_eth_auth_data_valid() && self.verify_signature() == Some(self.new_pk_hash) && self.account_id <= max_account_id() && self.fee_token <= max_token_id() && is_fee_amount_packable(&self.fee) && self .time_range .map(|t| t.check_correctness()) .unwrap_or(true) } pub fn is_ecdsa(&self) -> bool { if let Some(auth_data) = &self.eth_auth_data { auth_data.is_ecdsa() } else { self.eth_signature.is_some() } } pub fn is_onchain(&self) -> bool { if let Some(auth_data) = &self.eth_auth_data { auth_data.is_onchain() } else { self.eth_signature.is_none() } } /// Get part of the message that should be signed with Ethereum account key for the batch of transactions. /// The message for single `ChangePubKey` transaction is defined differently. The pattern is: /// /// Set signing key: {pubKeyHash} /// [Fee: {fee} {token}] /// /// Note that the second line is optional. pub fn get_ethereum_sign_message_part(&self, token_symbol: &str, decimals: u8) -> String { let mut message = format!( "Set signing key: {}", hex::encode(&self.new_pk_hash.data).to_ascii_lowercase() ); if !self.fee.is_zero() { message.push_str( format!( "\nFee: {fee} {token}", fee = format_units(&self.fee, decimals), token = token_symbol, ) .as_str(), ); } message } pub fn get_change_pubkey_fee_type(&self) -> ChangePubKeyFeeTypeArg { if let Some(auth_data) = &self.eth_auth_data { ChangePubKeyFeeTypeArg::ContractsV4Version(auth_data.get_fee_type()) } else { ChangePubKeyFeeTypeArg::PreContracts4Version { onchain_pubkey_auth: self.eth_auth_data.is_none(), } } } pub fn get_fee_type(&self) -> TxFeeTypes { TxFeeTypes::ChangePubKey(self.get_change_pubkey_fee_type()) } }
38.041379
110
0.613125
f99be73243fd045bf906bc3ed93fa6ef50508870
6,447
// SPDX-License-Identifier: MIT use crate::{ traits::{Emitable, Parseable}, DecodeError, }; pub const INET6_STATS_LEN: usize = 288; buffer!(Inet6StatsBuffer(INET6_STATS_LEN) { num: (i64, 0..8), in_pkts: (i64, 8..16), in_octets: (i64, 16..24), in_delivers: (i64, 24..32), out_forw_datagrams: (i64, 32..40), out_pkts: (i64, 40..48), out_octets: (i64, 48..56), in_hdr_errors: (i64, 56..64), in_too_big_errors: (i64, 64..72), in_no_routes: (i64, 72..80), in_addr_errors: (i64, 80..88), in_unknown_protos: (i64, 88..96), in_truncated_pkts: (i64, 96..104), in_discards: (i64, 104..112), out_discards: (i64, 112..120), out_no_routes: (i64, 120..128), reasm_timeout: (i64, 128..136), reasm_reqds: (i64, 136..144), reasm_oks: (i64, 144..152), reasm_fails: (i64, 152..160), frag_oks: (i64, 160..168), frag_fails: (i64, 168..176), frag_creates: (i64, 176..184), in_mcast_pkts: (i64, 184..192), out_mcast_pkts: (i64, 192..200), in_bcast_pkts: (i64, 200..208), out_bcast_pkts: (i64, 208..216), in_mcast_octets: (i64, 216..224), out_mcast_octets: (i64, 224..232), in_bcast_octets: (i64, 232..240), out_bcast_octets: (i64, 240..248), in_csum_errors: (i64, 248..256), in_no_ect_pkts: (i64, 256..264), in_ect1_pkts: (i64, 264..272), in_ect0_pkts: (i64, 272..280), in_ce_pkts: (i64, 280..288), }); #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Inet6Stats { pub num: i64, pub in_pkts: i64, pub in_octets: i64, pub in_delivers: i64, pub out_forw_datagrams: i64, pub out_pkts: i64, pub out_octets: i64, pub in_hdr_errors: i64, pub in_too_big_errors: i64, pub in_no_routes: i64, pub in_addr_errors: i64, pub in_unknown_protos: i64, pub in_truncated_pkts: i64, pub in_discards: i64, pub out_discards: i64, pub out_no_routes: i64, pub reasm_timeout: i64, pub reasm_reqds: i64, pub reasm_oks: i64, pub reasm_fails: i64, pub frag_oks: i64, pub frag_fails: i64, pub frag_creates: i64, pub in_mcast_pkts: i64, pub out_mcast_pkts: i64, pub in_bcast_pkts: i64, pub out_bcast_pkts: i64, pub in_mcast_octets: i64, pub out_mcast_octets: i64, pub in_bcast_octets: i64, pub out_bcast_octets: i64, pub in_csum_errors: i64, pub in_no_ect_pkts: i64, pub in_ect1_pkts: i64, pub in_ect0_pkts: i64, pub in_ce_pkts: i64, } impl<T: AsRef<[u8]>> Parseable<Inet6StatsBuffer<T>> for Inet6Stats { fn parse(buf: &Inet6StatsBuffer<T>) -> Result<Self, DecodeError> { Ok(Self { num: buf.num(), in_pkts: buf.in_pkts(), in_octets: buf.in_octets(), in_delivers: buf.in_delivers(), out_forw_datagrams: buf.out_forw_datagrams(), out_pkts: buf.out_pkts(), out_octets: buf.out_octets(), in_hdr_errors: buf.in_hdr_errors(), in_too_big_errors: buf.in_too_big_errors(), in_no_routes: buf.in_no_routes(), in_addr_errors: buf.in_addr_errors(), in_unknown_protos: buf.in_unknown_protos(), in_truncated_pkts: buf.in_truncated_pkts(), in_discards: buf.in_discards(), out_discards: buf.out_discards(), out_no_routes: buf.out_no_routes(), reasm_timeout: buf.reasm_timeout(), reasm_reqds: buf.reasm_reqds(), reasm_oks: buf.reasm_oks(), reasm_fails: buf.reasm_fails(), frag_oks: buf.frag_oks(), frag_fails: buf.frag_fails(), frag_creates: buf.frag_creates(), in_mcast_pkts: buf.in_mcast_pkts(), out_mcast_pkts: buf.out_mcast_pkts(), in_bcast_pkts: buf.in_bcast_pkts(), out_bcast_pkts: buf.out_bcast_pkts(), in_mcast_octets: buf.in_mcast_octets(), out_mcast_octets: buf.out_mcast_octets(), in_bcast_octets: buf.in_bcast_octets(), out_bcast_octets: buf.out_bcast_octets(), in_csum_errors: buf.in_csum_errors(), in_no_ect_pkts: buf.in_no_ect_pkts(), in_ect1_pkts: buf.in_ect1_pkts(), in_ect0_pkts: buf.in_ect0_pkts(), in_ce_pkts: buf.in_ce_pkts(), }) } } impl Emitable for Inet6Stats { fn buffer_len(&self) -> usize { INET6_STATS_LEN } fn emit(&self, buffer: &mut [u8]) { let mut buffer = Inet6StatsBuffer::new(buffer); buffer.set_num(self.num); buffer.set_in_pkts(self.in_pkts); buffer.set_in_octets(self.in_octets); buffer.set_in_delivers(self.in_delivers); buffer.set_out_forw_datagrams(self.out_forw_datagrams); buffer.set_out_pkts(self.out_pkts); buffer.set_out_octets(self.out_octets); buffer.set_in_hdr_errors(self.in_hdr_errors); buffer.set_in_too_big_errors(self.in_too_big_errors); buffer.set_in_no_routes(self.in_no_routes); buffer.set_in_addr_errors(self.in_addr_errors); buffer.set_in_unknown_protos(self.in_unknown_protos); buffer.set_in_truncated_pkts(self.in_truncated_pkts); buffer.set_in_discards(self.in_discards); buffer.set_out_discards(self.out_discards); buffer.set_out_no_routes(self.out_no_routes); buffer.set_reasm_timeout(self.reasm_timeout); buffer.set_reasm_reqds(self.reasm_reqds); buffer.set_reasm_oks(self.reasm_oks); buffer.set_reasm_fails(self.reasm_fails); buffer.set_frag_oks(self.frag_oks); buffer.set_frag_fails(self.frag_fails); buffer.set_frag_creates(self.frag_creates); buffer.set_in_mcast_pkts(self.in_mcast_pkts); buffer.set_out_mcast_pkts(self.out_mcast_pkts); buffer.set_in_bcast_pkts(self.in_bcast_pkts); buffer.set_out_bcast_pkts(self.out_bcast_pkts); buffer.set_in_mcast_octets(self.in_mcast_octets); buffer.set_out_mcast_octets(self.out_mcast_octets); buffer.set_in_bcast_octets(self.in_bcast_octets); buffer.set_out_bcast_octets(self.out_bcast_octets); buffer.set_in_csum_errors(self.in_csum_errors); buffer.set_in_no_ect_pkts(self.in_no_ect_pkts); buffer.set_in_ect1_pkts(self.in_ect1_pkts); buffer.set_in_ect0_pkts(self.in_ect0_pkts); buffer.set_in_ce_pkts(self.in_ce_pkts); } }
36.630682
70
0.650845
71fc54aeb24ce1c7a1d97837cefc69d065e9a0e4
16,336
use crate::prelude::{Children, Parent, PreviousParent}; use bevy_ecs::{ bundle::Bundle, entity::Entity, system::{Command, Commands, EntityCommands}, world::{EntityMut, World}, }; use smallvec::SmallVec; #[derive(Debug)] pub struct InsertChildren { parent: Entity, children: SmallVec<[Entity; 8]>, index: usize, } impl Command for InsertChildren { fn write(self, world: &mut World) { for child in self.children.iter() { world .entity_mut(*child) // FIXME: don't erase the previous parent (see #1545) .insert_bundle((Parent(self.parent), PreviousParent(self.parent))); } { if let Some(mut children) = world.get_mut::<Children>(self.parent) { children.0.insert_from_slice(self.index, &self.children); } else { world .entity_mut(self.parent) .insert(Children(self.children)); } } } } #[derive(Debug)] pub struct PushChildren { parent: Entity, children: SmallVec<[Entity; 8]>, } pub struct ChildBuilder<'a, 'b> { commands: &'b mut Commands<'a>, push_children: PushChildren, } impl Command for PushChildren { fn write(self, world: &mut World) { for child in self.children.iter() { world .entity_mut(*child) // FIXME: don't erase the previous parent (see #1545) .insert_bundle((Parent(self.parent), PreviousParent(self.parent))); } { let mut added = false; if let Some(mut children) = world.get_mut::<Children>(self.parent) { children.0.extend(self.children.iter().cloned()); added = true; } // NOTE: ideally this is just an else statement, but currently that _incorrectly_ fails // borrow-checking if !added { world .entity_mut(self.parent) .insert(Children(self.children)); } } } } impl<'a, 'b> ChildBuilder<'a, 'b> { pub fn spawn_bundle(&mut self, bundle: impl Bundle) -> EntityCommands<'a, '_> { let e = self.commands.spawn_bundle(bundle); self.push_children.children.push(e.id()); e } pub fn spawn(&mut self) -> EntityCommands<'a, '_> { let e = self.commands.spawn(); self.push_children.children.push(e.id()); e } pub fn parent_entity(&self) -> Entity { self.push_children.parent } pub fn add_command<C: Command + 'static>(&mut self, command: C) -> &mut Self { self.commands.add(command); self } } pub trait BuildChildren { fn with_children(&mut self, f: impl FnOnce(&mut ChildBuilder)) -> &mut Self; fn push_children(&mut self, children: &[Entity]) -> &mut Self; fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self; } impl<'a, 'b> BuildChildren for EntityCommands<'a, 'b> { fn with_children(&mut self, spawn_children: impl FnOnce(&mut ChildBuilder)) -> &mut Self { let parent = self.id(); let push_children = { let mut builder = ChildBuilder { commands: self.commands(), push_children: PushChildren { children: SmallVec::default(), parent, }, }; spawn_children(&mut builder); builder.push_children }; self.commands().add(push_children); self } fn push_children(&mut self, children: &[Entity]) -> &mut Self { let parent = self.id(); self.commands().add(PushChildren { children: SmallVec::from(children), parent, }); self } fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { let parent = self.id(); self.commands().add(InsertChildren { children: SmallVec::from(children), index, parent, }); self } } #[derive(Debug)] pub struct WorldChildBuilder<'w> { world: &'w mut World, current_entity: Option<Entity>, parent_entities: Vec<Entity>, } impl<'w> WorldChildBuilder<'w> { pub fn spawn_bundle(&mut self, bundle: impl Bundle + Send + Sync + 'static) -> EntityMut<'_> { let parent_entity = self.parent_entity(); let entity = self .world .spawn() .insert_bundle(bundle) .insert_bundle((Parent(parent_entity), PreviousParent(parent_entity))) .id(); self.current_entity = Some(entity); if let Some(mut parent) = self.world.get_entity_mut(parent_entity) { if let Some(mut children) = parent.get_mut::<Children>() { children.0.push(entity); } else { parent.insert(Children(smallvec::smallvec![entity])); } } self.world.entity_mut(entity) } pub fn spawn(&mut self) -> EntityMut<'_> { let parent_entity = self.parent_entity(); let entity = self .world .spawn() .insert_bundle((Parent(parent_entity), PreviousParent(parent_entity))) .id(); self.current_entity = Some(entity); if let Some(mut parent) = self.world.get_entity_mut(parent_entity) { if let Some(mut children) = parent.get_mut::<Children>() { children.0.push(entity); } else { parent.insert(Children(smallvec::smallvec![entity])); } } self.world.entity_mut(entity) } pub fn parent_entity(&self) -> Entity { self.parent_entities .last() .cloned() .expect("There should always be a parent at this point.") } } pub trait BuildWorldChildren { fn with_children(&mut self, spawn_children: impl FnOnce(&mut WorldChildBuilder)) -> &mut Self; fn push_children(&mut self, children: &[Entity]) -> &mut Self; fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self; } impl<'w> BuildWorldChildren for EntityMut<'w> { fn with_children(&mut self, spawn_children: impl FnOnce(&mut WorldChildBuilder)) -> &mut Self { { let entity = self.id(); let mut builder = WorldChildBuilder { current_entity: None, parent_entities: vec![entity], // SAFE: self.update_location() is called below. It is impossible to make EntityMut // function calls on `self` within the scope defined here world: unsafe { self.world_mut() }, }; spawn_children(&mut builder); } self.update_location(); self } fn push_children(&mut self, children: &[Entity]) -> &mut Self { let parent = self.id(); { // SAFE: parent entity is not modified and its location is updated manually let world = unsafe { self.world_mut() }; for child in children.iter() { world .entity_mut(*child) // FIXME: don't erase the previous parent (see #1545) .insert_bundle((Parent(parent), PreviousParent(parent))); } // Inserting a bundle in the children entities may change the parent entity's location if they were of the same archetype self.update_location(); } if let Some(mut children_component) = self.get_mut::<Children>() { children_component.0.extend(children.iter().cloned()); } else { self.insert(Children::with(children)); } self } fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { let parent = self.id(); { // SAFE: parent entity is not modified and its location is updated manually let world = unsafe { self.world_mut() }; for child in children.iter() { world .entity_mut(*child) // FIXME: don't erase the previous parent (see #1545) .insert_bundle((Parent(parent), PreviousParent(parent))); } // Inserting a bundle in the children entities may change the parent entity's location if they were of the same archetype self.update_location(); } if let Some(mut children_component) = self.get_mut::<Children>() { children_component.0.insert_from_slice(index, children); } else { self.insert(Children::with(children)); } self } } impl<'w> BuildWorldChildren for WorldChildBuilder<'w> { fn with_children( &mut self, spawn_children: impl FnOnce(&mut WorldChildBuilder<'w>), ) -> &mut Self { let current_entity = self .current_entity .expect("Cannot add children without a parent. Try creating an entity first."); self.parent_entities.push(current_entity); self.current_entity = None; spawn_children(self); self.current_entity = self.parent_entities.pop(); self } fn push_children(&mut self, children: &[Entity]) -> &mut Self { let parent = self .current_entity .expect("Cannot add children without a parent. Try creating an entity first."); for child in children.iter() { self.world .entity_mut(*child) // FIXME: don't erase the previous parent (see #1545) .insert_bundle((Parent(parent), PreviousParent(parent))); } if let Some(mut children_component) = self.world.get_mut::<Children>(parent) { children_component.0.extend(children.iter().cloned()); } else { self.world .entity_mut(parent) .insert(Children::with(children)); } self } fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { let parent = self .current_entity .expect("Cannot add children without a parent. Try creating an entity first."); for child in children.iter() { self.world .entity_mut(*child) // FIXME: don't erase the previous parent (see #1545) .insert_bundle((Parent(parent), PreviousParent(parent))); } if let Some(mut children_component) = self.world.get_mut::<Children>(parent) { children_component.0.insert_from_slice(index, children); } else { self.world .entity_mut(parent) .insert(Children::with(children)); } self } } #[cfg(test)] mod tests { use super::{BuildChildren, BuildWorldChildren}; use crate::prelude::{Children, Parent, PreviousParent}; use bevy_ecs::{ entity::Entity, system::{CommandQueue, Commands}, world::World, }; use smallvec::{smallvec, SmallVec}; #[test] fn build_children() { let mut world = World::default(); let mut queue = CommandQueue::default(); let mut commands = Commands::new(&mut queue, &world); let mut children = Vec::new(); let parent = commands.spawn().insert(1).id(); commands.entity(parent).with_children(|parent| { children.push(parent.spawn().insert(2).id()); children.push(parent.spawn().insert(3).id()); children.push(parent.spawn().insert(4).id()); }); queue.apply(&mut world); assert_eq!( world.get::<Children>(parent).unwrap().0.as_slice(), children.as_slice(), ); assert_eq!(*world.get::<Parent>(children[0]).unwrap(), Parent(parent)); assert_eq!(*world.get::<Parent>(children[1]).unwrap(), Parent(parent)); assert_eq!( *world.get::<PreviousParent>(children[0]).unwrap(), PreviousParent(parent) ); assert_eq!( *world.get::<PreviousParent>(children[1]).unwrap(), PreviousParent(parent) ); } #[test] fn push_and_insert_children_commands() { let mut world = World::default(); let entities = world .spawn_batch(vec![(1,), (2,), (3,), (4,), (5,)]) .collect::<Vec<Entity>>(); let mut queue = CommandQueue::default(); { let mut commands = Commands::new(&mut queue, &world); commands.entity(entities[0]).push_children(&entities[1..3]); } queue.apply(&mut world); let parent = entities[0]; let child1 = entities[1]; let child2 = entities[2]; let child3 = entities[3]; let child4 = entities[4]; let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; assert_eq!( world.get::<Children>(parent).unwrap().0.clone(), expected_children ); assert_eq!(*world.get::<Parent>(child1).unwrap(), Parent(parent)); assert_eq!(*world.get::<Parent>(child2).unwrap(), Parent(parent)); assert_eq!( *world.get::<PreviousParent>(child1).unwrap(), PreviousParent(parent) ); assert_eq!( *world.get::<PreviousParent>(child2).unwrap(), PreviousParent(parent) ); { let mut commands = Commands::new(&mut queue, &world); commands.entity(parent).insert_children(1, &entities[3..]); } queue.apply(&mut world); let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child3, child4, child2]; assert_eq!( world.get::<Children>(parent).unwrap().0.clone(), expected_children ); assert_eq!(*world.get::<Parent>(child3).unwrap(), Parent(parent)); assert_eq!(*world.get::<Parent>(child4).unwrap(), Parent(parent)); assert_eq!( *world.get::<PreviousParent>(child3).unwrap(), PreviousParent(parent) ); assert_eq!( *world.get::<PreviousParent>(child4).unwrap(), PreviousParent(parent) ); } #[test] fn push_and_insert_children_world() { let mut world = World::default(); let entities = world .spawn_batch(vec![(1,), (2,), (3,), (4,), (5,)]) .collect::<Vec<Entity>>(); world.entity_mut(entities[0]).push_children(&entities[1..3]); let parent = entities[0]; let child1 = entities[1]; let child2 = entities[2]; let child3 = entities[3]; let child4 = entities[4]; let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; assert_eq!( world.get::<Children>(parent).unwrap().0.clone(), expected_children ); assert_eq!(*world.get::<Parent>(child1).unwrap(), Parent(parent)); assert_eq!(*world.get::<Parent>(child2).unwrap(), Parent(parent)); assert_eq!( *world.get::<PreviousParent>(child1).unwrap(), PreviousParent(parent) ); assert_eq!( *world.get::<PreviousParent>(child2).unwrap(), PreviousParent(parent) ); world.entity_mut(parent).insert_children(1, &entities[3..]); let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child3, child4, child2]; assert_eq!( world.get::<Children>(parent).unwrap().0.clone(), expected_children ); assert_eq!(*world.get::<Parent>(child3).unwrap(), Parent(parent)); assert_eq!(*world.get::<Parent>(child4).unwrap(), Parent(parent)); assert_eq!( *world.get::<PreviousParent>(child3).unwrap(), PreviousParent(parent) ); assert_eq!( *world.get::<PreviousParent>(child4).unwrap(), PreviousParent(parent) ); } #[test] fn regression_push_children_same_archetype() { let mut world = World::new(); let child = world.spawn().id(); world.spawn().push_children(&[child]); } }
33.613169
133
0.555338
0e7ef413f0288e7377f1a12a16d548fd6b0374ef
3,128
#[doc = "Reader of register LPSPI3_SDI_SELECT_INPUT"] pub type R = crate::R<u32, super::LPSPI3_SDI_SELECT_INPUT>; #[doc = "Writer for register LPSPI3_SDI_SELECT_INPUT"] pub type W = crate::W<u32, super::LPSPI3_SDI_SELECT_INPUT>; #[doc = "Register LPSPI3_SDI_SELECT_INPUT `reset()`'s with value 0"] impl crate::ResetValue for super::LPSPI3_SDI_SELECT_INPUT { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Selecting Pads Involved in Daisy Chain.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DAISY_A { #[doc = "0: Selecting Pad: GPIO_AD_B0_02 for Mode: ALT7"] GPIO_AD_B0_02_ALT7 = 0, #[doc = "1: Selecting Pad: GPIO_AD_B1_13 for Mode: ALT2"] GPIO_AD_B1_13_ALT2 = 1, } impl From<DAISY_A> for bool { #[inline(always)] fn from(variant: DAISY_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DAISY`"] pub type DAISY_R = crate::R<bool, DAISY_A>; impl DAISY_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DAISY_A { match self.bits { false => DAISY_A::GPIO_AD_B0_02_ALT7, true => DAISY_A::GPIO_AD_B1_13_ALT2, } } #[doc = "Checks if the value of the field is `GPIO_AD_B0_02_ALT7`"] #[inline(always)] pub fn is_gpio_ad_b0_02_alt7(&self) -> bool { *self == DAISY_A::GPIO_AD_B0_02_ALT7 } #[doc = "Checks if the value of the field is `GPIO_AD_B1_13_ALT2`"] #[inline(always)] pub fn is_gpio_ad_b1_13_alt2(&self) -> bool { *self == DAISY_A::GPIO_AD_B1_13_ALT2 } } #[doc = "Write proxy for field `DAISY`"] pub struct DAISY_W<'a> { w: &'a mut W, } impl<'a> DAISY_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DAISY_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Selecting Pad: GPIO_AD_B0_02 for Mode: ALT7"] #[inline(always)] pub fn gpio_ad_b0_02_alt7(self) -> &'a mut W { self.variant(DAISY_A::GPIO_AD_B0_02_ALT7) } #[doc = "Selecting Pad: GPIO_AD_B1_13 for Mode: ALT2"] #[inline(always)] pub fn gpio_ad_b1_13_alt2(self) -> &'a mut W { self.variant(DAISY_A::GPIO_AD_B1_13_ALT2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 0 - Selecting Pads Involved in Daisy Chain."] #[inline(always)] pub fn daisy(&self) -> DAISY_R { DAISY_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Selecting Pads Involved in Daisy Chain."] #[inline(always)] pub fn daisy(&mut self) -> DAISY_W { DAISY_W { w: self } } }
30.666667
71
0.598785
ff8fb621e9d4f57b12817638ef7985216f39dea5
2,019
use crate::error::Error; use crate::{utils, NodeInfo, Peer}; use std::fmt; use std::net::SocketAddr; use utils::Token; /// QuicP2p Events to the user #[derive(Debug)] pub enum Event { /// Network bootstrap failed. BootstrapFailure, /// Bootstrap connection to this node was successful. BootstrappedTo { /// Node information. node: NodeInfo, }, /// Connection to this peer failed. ConnectionFailure { /// Peer address. peer_addr: SocketAddr, /// Error explaining connection failure. err: Error, }, /// The given message was successfully sent to this peer. SentUserMessage { /// Peer address. peer_addr: SocketAddr, /// Sent message. msg: bytes::Bytes, /// Token, originally given by the user, for context. token: Token, }, /// The given message was not sent to this peer. UnsentUserMessage { /// Peer address. peer_addr: SocketAddr, /// Unsent message. msg: bytes::Bytes, /// Token, originally given by the user, for context. token: Token, }, /// Successfully connected to this peer. ConnectedTo { /// Peer information. peer: Peer, }, /// A new message was received from this peer. NewMessage { /// Sending peer address. peer_addr: SocketAddr, /// The new message. msg: bytes::Bytes, }, /// No more messages will be fired after this // TODO Currently used only for testing Finish, } impl fmt::Display for Event { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Event::NewMessage { ref peer_addr, ref msg, } => write!( f, "Event::NewMessage {{ peer_addr: {}, msg: {} }}", peer_addr, utils::bin_data_format(&*msg) ), _ => write!(f, "TODO"), } } }
26.92
65
0.544329
186440f86c2ecaf26add21dfb3f92735df31f892
5,354
use mongodb::Database; use bson::{Document, doc}; use crate::{db::{prelude::*, mongo}, model::password::Password, utils::context::ServiceContext, utils::errors::{ErrorCode, VaultError}}; /// /// Load the requested password from the database. /// #[tracing::instrument(name="db:load", skip(db))] pub async fn load(password_id: &str, db: &Database) -> Result<Password, VaultError> { let filter = doc!{ PASSWORD_ID: password_id }; match db.collection::<Password>(PASSWORDS).find_one(filter, None) .await .map_err(VaultError::from)? { Some(password) => Ok(password), None => Err(ErrorCode::PasswordNotFound.with_msg("The password requested does not exist")) } } /// /// Load the requested password from the database - if it exists. /// #[tracing::instrument(name="db:load_if_present", skip(db))] pub async fn load_if_present(password_id: &str, db: &Database) -> Result<Option<Password>, VaultError> { let filter = doc!{ PASSWORD_ID: password_id }; db.collection::<Password>(PASSWORDS).find_one(filter, None) .await .map_err(VaultError::from) } /// /// Create or update the password specified. /// #[tracing::instrument(name="db:upsert", skip(ctx, phc))] pub async fn upsert(ctx: &ServiceContext, password_id: &str, password_type: &str, phc: &str, max_history: u32) -> Result<(), VaultError> { let filter = doc! { PASSWORD_ID: password_id, }; // Note: The $push below appends the password to the end of history, but only keeps the last x // phcs in the history array. let update = doc!{ "$unset": { FAILURE_COUNT: "", FIRST_FAILURE: "" , RESET_CODE: "", RESET_STARTED_AT: "" }, "$set": { PASSWORD_ID: password_id, PASSWORD_TYPE: password_type, PHC: phc, CHANGED_ON: bson::DateTime::from_chrono(ctx.now()), }, "$push": { HISTORY: { "$each": [ phc ], "$slice": -(max_history as i32) } } }; ctx.db().collection::<Document>(PASSWORDS).update_one(filter, update, mongo::upsert()) .await .map_err(VaultError::from)?; Ok(()) } #[tracing::instrument(name="db:delete", skip(db))] pub async fn delete(password_id: &str, db: &Database) -> Result<u64, VaultError> { let filter = doc! { PASSWORD_ID: password_id, }; let result = db.collection::<Document>(PASSWORDS).delete_one(filter, None) .await .map_err(VaultError::from)?; Ok(result.deleted_count) } #[tracing::instrument(name="db:delete_by_type", skip(db))] pub async fn delete_by_type(password_type: &str, db: &Database) -> Result<u64, VaultError> { let filter = doc! { PASSWORD_TYPE: password_type, }; let result = db.collection::<Document>(PASSWORDS).delete_many(filter, None) .await .map_err(VaultError::from)?; Ok(result.deleted_count) } #[tracing::instrument(name="db:store_reset_code", skip(reset_code, ctx))] pub async fn store_reset_code(password_id: &str, reset_code: &str, ctx: &ServiceContext) -> Result<(), VaultError> { let filter = doc! { PASSWORD_ID: password_id, }; let update = doc!{ "$set": { RESET_CODE: reset_code, RESET_STARTED_AT: bson::DateTime::from_chrono(ctx.now()), } }; ctx.db().collection::<Document>(PASSWORDS).update_one(filter, update, None) .await .map_err(VaultError::from)?; Ok(()) } /// /// Bump the failure count and, if not set yet, timestamp the failure date. /// #[tracing::instrument(name="db:increase_failure_count", skip(ctx, password), fields(password_id=?password.password_id))] pub async fn increase_failure_count(ctx: &ServiceContext, password: &Password) -> Result<(), VaultError> { let filter = doc!{ PASSWORD_ID: &password.password_id }; // Update the failure count and potentially the first_failure timestamp. let update = match password.first_failure { Some(_) => doc!("$inc": { FAILURE_COUNT: 1 }), None => doc!{ "$inc": { FAILURE_COUNT: 1 }, "$set": { FIRST_FAILURE: bson::DateTime::from_chrono(ctx.now()) } }, }; ctx.db().collection::<Document>(PASSWORDS).update_one(filter, update, None) .await .map_err(VaultError::from)?; Ok(()) } /// /// Clear any failure details and timestamp a successful validate operation. /// #[tracing::instrument(name="db:record_success", skip(ctx, password), fields(password_id=?password.password_id))] pub async fn record_success(ctx: &ServiceContext, password: &Password) -> Result<(), VaultError> { let filter = doc!{ PASSWORD_ID: &password.password_id }; // Update the failure count and potentially the first_failure timestamp. let update = doc!{ "$unset": { FAILURE_COUNT: "", FIRST_FAILURE: "" , RESET_CODE: "", RESET_STARTED_AT: "" }, "$set": { LAST_SUCCESS: bson::DateTime::from_chrono(ctx.now()) } }; ctx.db().collection::<Document>(PASSWORDS).update_one(filter, update, None) .await .map_err(VaultError::from)?; Ok(()) }
29.256831
136
0.60777
26b06b355739ca7b7b9523ce4b518dd0562768ff
2,320
use itertools::Itertools; use cached::proc_macro::cached; use cached::stores::UnboundCache; #[aoc_generator(day10)] pub fn parse_input(input: &str) -> Vec<usize> { input.lines().map(|l| l.trim().parse().unwrap()).collect() } #[aoc(day10, part1)] pub fn solve_part1(input: &Vec<usize>) -> usize { let (_, diff1, diff3) = input .iter() .sorted() .fold((0, 0, 1), |(last, diff1, diff3), next| match next - last { 1 => (*next, diff1 + 1, diff3), 2 => (*next, diff1, diff3), 3 => (*next, diff1, diff3 + 1), _ => panic!("Unknown diff {} {} {}", next, last, next - last), }); diff1 * diff3 } #[cached( convert = r#"{format!("{}:{}",goal,current)}"#, create = "{UnboundCache::new()}", type = "UnboundCache<String,usize>" )] fn find_combo(sockets: &[usize], current: usize, goal: usize) -> usize { if current + 3 == goal { return 1; } let next_max = current + 3; let mut path_count = 0; for path in sockets.iter().take_while(|&&x| x <= next_max) { let new_sockets = sockets .iter() .filter(|&&x| x != *path) .cloned() .collect::<Vec<usize>>(); path_count += find_combo(&new_sockets, *path, goal); } path_count } #[aoc(day10, part2)] pub fn solve_part2(input: &Vec<usize>) -> usize { let sockets: Vec<usize> = input.iter().sorted().cloned().collect(); let goal = sockets.iter().max().unwrap() + 3; find_combo(&sockets, 0, goal) } #[cfg(test)] mod tests { use super::*; #[test] fn part2_a() { let input = "16 10 15 5 1 11 7 19 6 12 4"; let input = parse_input(input); assert_eq!(8, solve_part2(&input)); } #[test] fn part2_b() { let input = "28 33 18 42 31 14 46 20 48 47 24 23 49 45 19 38 39 11 1 32 25 35 8 17 7 9 4 2 34 10 3"; let input = parse_input(input); assert_eq!(19208, solve_part2(&input)); } }
19.82906
74
0.465948
8fc81e9e9334ee44ff43326aa3a9e42a3cfcb98e
16,980
use crate::join::graph::Edge; use crate::join::{Join, JoinGraph, QualifiedJoin}; use crate::lgc::LgcPlan; use crate::op::{Aggr, Apply, Op, OpVisitor, SortItem}; use crate::query::QuerySet; use crate::setop::Setop; use std::fmt::{self, Write}; use xngin_expr::controlflow::{Branch, ControlFlow, Unbranch}; use xngin_expr::{AggKind, Col, Const, Expr, ExprKind, Farg, Pred, QueryID, Setq}; const INDENT: usize = 4; const BRANCH_1: char = '└'; const BRANCH_N: char = '├'; const BRANCH_V: char = '│'; const LINE: char = '─'; /// Explain defines how to explain an expression, an operator /// or a plan. pub trait Explain { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result; } impl Explain for LgcPlan { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { for attach in &self.attaches { match self.qry_set.get(attach) { Some(subq) => { let mut qe = QueryExplain { title: Some(format!("(aq{})", **attach)), queries: &self.qry_set, f, spans: vec![], }; subq.root.walk(&mut qe).unbranch()? } None => f.write_str("No attached plan found")?, } } match self.qry_set.get(&self.root) { Some(subq) => { let mut qe = QueryExplain { title: Some("(root) ".to_string()), queries: &self.qry_set, f, spans: vec![], }; subq.root.walk(&mut qe).unbranch() } None => f.write_str("No plan found"), } } } /* Implements Explain for all operators */ impl Explain for Op { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { match self { Op::Proj { cols, .. } => { f.write_str("Proj{")?; write_refs(f, cols.iter().map(|(e, _)| e), ", ")?; f.write_str("}") } Op::Filt { pred, .. } => { f.write_str("Filt{")?; if !pred.is_empty() { write_refs(f, pred, " and ")?; } f.write_char('}') } Op::Aggr(aggr) => aggr.explain(f), Op::Sort { items, .. } => { f.write_str("Sort{")?; write_refs(f, items, ", ")?; f.write_char('}') } Op::Join(join) => join.explain(f), Op::JoinGraph(graph) => graph.explain(f), Op::Setop(setop) => setop.explain(f), Op::Limit { start, end, .. } => { write!(f, "Limit{{{}, {}}}", start, end) } Op::Attach(_, qry_id) => { f.write_str("Attach{")?; qry_id.explain(f)?; f.write_char('}') } Op::Row(row) => { f.write_str("Row{")?; write_refs(f, row.iter().map(|(e, _)| e), ", ")?; f.write_char('}') } Op::Table(_, table_id) => { write!(f, "Table{{{}}}", table_id.value()) } Op::Query(_) => f.write_str("(subquery todo)"), Op::Empty => f.write_str("Empty"), } } } impl Explain for SortItem { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { self.expr.explain(f)?; if self.desc { f.write_str(" desc")? } Ok(()) } } impl Explain for Aggr { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { f.write_str("Aggr{")?; f.write_str("proj=[")?; write_refs(f, self.proj.iter().map(|(e, _)| e), ", ")?; f.write_str("]}") } } impl Explain for Join { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { f.write_str("Join{")?; match self { Join::Cross(_) => f.write_str("cross")?, Join::Qualified(QualifiedJoin { kind, cond, filt, .. }) => { f.write_str(kind.to_lower())?; if !cond.is_empty() { f.write_str(", cond=[")?; write_refs(f, cond, " and ")?; f.write_char(']')? } if !filt.is_empty() { f.write_str(", filt=[")?; write_refs(f, filt, " and ")?; f.write_char(']')? } } } f.write_char('}') } } impl Explain for JoinGraph { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { f.write_str("JoinGraph{vs=[")?; write_refs(f, &self.queries(), ", ")?; f.write_str("]")?; if self.n_edges() > 0 { f.write_str(", es=[{")?; write_objs( f, self.eids().map(|eid| GraphEdge { g: self, e: self.edge(eid), }), "}, {", )?; f.write_str("}]")? } f.write_char('}') } } impl Explain for QueryID { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { write!(f, "q{}", **self) } } struct GraphEdge<'a> { g: &'a JoinGraph, e: &'a Edge, } impl<'a> Explain for GraphEdge<'a> { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { f.write_str(self.e.kind.to_lower())?; write!( f, ", ls={}, rs={}, es={}, cond=[", self.e.l_vset.len(), self.e.r_vset.len(), self.e.e_vset.len() )?; write_refs(f, self.g.preds(self.e.cond.clone()), " and ")?; if !self.e.filt.is_empty() { f.write_str("], filt=[")?; write_refs(f, self.g.preds(self.e.filt.clone()), " and ")?; } f.write_char(']') } } impl Explain for Apply { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { f.write_str("Apply{[")?; write_refs(f, &self.vars, ", ")?; f.write_str("]}") } } impl Explain for Setop { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { f.write_str("Setop{")?; f.write_str(self.kind.to_lower())?; if self.q == Setq::All { f.write_str(" all")? } f.write_char('}') } } /* Implements Explain for all expressions */ impl Explain for Expr { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { match &self.kind { ExprKind::Const(c) => c.explain(f), ExprKind::Col(c) => c.explain(f), ExprKind::Aggf { kind, q, arg } => { match kind { AggKind::Count => f.write_str("count(")?, AggKind::Sum => f.write_str("sum(")?, AggKind::Max => f.write_str("max(")?, AggKind::Min => f.write_str("min(")?, AggKind::Avg => f.write_str("avg(")?, } if *q == Setq::Distinct { f.write_str("distinct ")? } arg.explain(f)?; f.write_char(')') } ExprKind::Func { kind, args, .. } => { f.write_str(kind.to_lower())?; f.write_char('(')?; if args.is_empty() { return f.write_char(')'); } write_refs(f, args.as_ref(), ", ")?; f.write_char(')') } ExprKind::Case { op, acts, fallback } => { f.write_str("case ")?; if op.kind != ExprKind::Farg(Farg::None) { op.explain(f)?; f.write_char(' ')? } for branch in acts.as_ref().chunks_exact(2) { f.write_str("when ")?; branch[0].explain(f)?; f.write_str(" then ")?; branch[1].explain(f)?; f.write_char(' ')? } if fallback.kind != ExprKind::Farg(Farg::None) { f.write_str("else ")?; fallback.explain(f)?; f.write_char(' ')? } f.write_str("end") } ExprKind::Cast { arg, ty, .. } => { f.write_str("cast(")?; arg.explain(f)?; f.write_str(" as ")?; f.write_str(ty.to_lower().as_ref())?; f.write_char(')') } ExprKind::Pred(p) => p.explain(f), ExprKind::Tuple(es) => { f.write_char('(')?; write_refs(f, es, ", ")?; f.write_char(')') } ExprKind::Subq(_, qry_id) => { write!(f, "subq({})", **qry_id) } ExprKind::Attval(qry_id) => { write!(f, "attval({})", **qry_id) } ExprKind::Plhd(_) => write!(f, "(placeholder todo)"), ExprKind::Farg(_) => write!(f, "(funcarg todo)"), } } } impl Explain for Const { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { match self { Const::I64(v) => write!(f, "{}", v), Const::U64(v) => write!(f, "{}", v), Const::F64(v) => write!(f, "{}", v.value()), Const::Decimal(v) => write!(f, "{}", v.to_string(-1)), Const::Date(v) => write!(f, "date'{}'", v), Const::Time(v) => write!(f, "time'{}'", v), Const::Datetime(v) => write!(f, "timestamp'{}'", v), Const::Interval(v) => write!(f, "interval'{}'{}", v.value, v.unit.to_lower()), Const::String(s) => write!(f, "'{}'", s), Const::Bytes(_) => write!(f, "(bytes todo)"), Const::Bool(b) => write!(f, "{}", b), Const::Null => f.write_str("null"), } } } impl Explain for Col { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { match self { Col::TableCol(table_id, idx) => write!(f, "t{}.{}", table_id.value(), idx), Col::QueryCol(query_id, idx) => write!(f, "q{}.{}", **query_id, idx), Col::CorrelatedCol(query_id, idx) => write!(f, "cq{}.{}", **query_id, idx), } } } impl Explain for Pred { fn explain<F: Write>(&self, f: &mut F) -> fmt::Result { match self { Pred::Conj(es) => write_refs(f, es, " and "), Pred::Disj(es) => write_refs(f, es, " or "), Pred::Xor(es) => write_refs(f, es, " xor "), Pred::Func { kind, args } => { f.write_str(kind.to_lower())?; f.write_char('(')?; write_refs(f, args.as_ref(), ", ")?; f.write_char(')') } Pred::Not(e) => { f.write_str("not ")?; e.explain(f) } Pred::InSubquery(lhs, subq) => { lhs.explain(f)?; f.write_str(" in ")?; subq.explain(f) } Pred::NotInSubquery(lhs, subq) => { lhs.explain(f)?; f.write_str(" not in ")?; subq.explain(f) } Pred::Exists(subq) => { f.write_str("exists ")?; subq.explain(f) } Pred::NotExists(subq) => { f.write_str("not exists ")?; subq.explain(f) } } } } fn write_refs<'i, F, E: 'i, I>(f: &mut F, exprs: I, delimiter: &str) -> fmt::Result where F: Write, E: Explain, I: IntoIterator<Item = &'i E>, { let mut exprs = exprs.into_iter(); if let Some(head) = exprs.next() { head.explain(f)? } for e in exprs { f.write_str(delimiter)?; e.explain(f)? } Ok(()) } fn write_objs<'i, F, E: 'i, I>(f: &mut F, exprs: I, delimiter: &str) -> fmt::Result where F: Write, E: Explain, I: IntoIterator<Item = E>, { let mut exprs = exprs.into_iter(); if let Some(head) = exprs.next() { head.explain(f)? } for e in exprs { f.write_str(delimiter)?; e.explain(f)? } Ok(()) } #[derive(Debug, Clone, Copy)] enum Span { Space(u16), Branch(u16, bool), } struct QueryExplain<'a, F> { title: Option<String>, queries: &'a QuerySet, f: &'a mut F, spans: Vec<Span>, // res: fmt::Result, } impl<'a, F: Write> QueryExplain<'a, F> { // returns true if continue fn write_prefix(&mut self) -> fmt::Result { write_prefix(self.f, &self.spans)?; // only write title once if let Some(s) = self.title.take() { self.write_str(&s)?; } Ok(()) } fn write_str(&mut self, s: &str) -> fmt::Result { self.f.write_str(s) } } impl<F: Write> OpVisitor for QueryExplain<'_, F> { type Cont = (); type Break = fmt::Error; #[inline] fn enter(&mut self, op: &Op) -> ControlFlow<fmt::Error> { // special handling Subquery if let Op::Query(query_id) = op { return if let Some(subq) = self.queries.get(query_id) { let mut qe = QueryExplain { title: Some(format!("(q{}) ", **query_id)), queries: self.queries, f: self.f, spans: self.spans.clone(), }; subq.root.walk(&mut qe) } else { ControlFlow::Break(fmt::Error) }; } let child_cnt = op.inputs().len(); self.write_prefix().branch()?; // process at parent level if let Some(span) = self.spans.pop() { match span { Span::Branch(1, _) => { if let Some(Span::Space(n)) = self.spans.last_mut() { *n += INDENT as u16 } else { self.spans.push(Span::Space(INDENT as u16)) } } Span::Branch(n, _) => self.spans.push(Span::Branch(n - 1, true)), _ => self.spans.push(span), } } // process at current level if child_cnt > 0 { self.spans.push(Span::Branch(child_cnt as u16, false)) } op.explain(self.f).branch()?; self.write_str("\n").branch() } #[inline] fn leave(&mut self, _op: &Op) -> ControlFlow<fmt::Error> { if let Some(span) = self.spans.last_mut() { match span { Span::Branch(1, _) => { let _ = self.spans.pop(); } Span::Branch(n, vertical) => { *n -= 1; *vertical = false; } _ => (), } } ControlFlow::Continue(()) } } fn write_prefix<F: Write>(f: &mut F, spans: &[Span]) -> fmt::Result { for &span in spans { match span { Span::Space(n) => { for _ in 0..n { f.write_char(' ')? } } Span::Branch(1, false) => { f.write_char(BRANCH_1)?; for _ in 1..INDENT { f.write_char(LINE)? } } Span::Branch(_, false) => { f.write_char(BRANCH_N)?; for _ in 1..INDENT { f.write_char(LINE)? } } Span::Branch(_, true) => { f.write_char(BRANCH_V)?; for _ in 1..INDENT { f.write_char(' ')? } } } } Ok(()) } #[cfg(test)] mod tests { use super::Explain; use crate::builder::tests::tpch_catalog; use crate::builder::PlanBuilder; use xngin_frontend::parser::dialect::MySQL; use xngin_frontend::parser::parse_query; #[test] fn test_explain_plan() { let cat = tpch_catalog(); for sql in vec![ "select 1, true, 1.0e2, 1 & 2, 1 | 2, 1 ^ 2, 1 << 2, 1 >> 2, 1 and 2, 1 or 2, 1 xor 2", "with cte1 as (select 1), cte2 as (select 2) select * from cte1", "select l1.l_orderkey from lineitem l1, lineitem l2", "select l_orderkey from lineitem union all select l_orderkey from lineitem", "select 1 union select 2", "select 1 from lineitem join (select 1) t1", ] { let builder = PlanBuilder::new(&cat, "tpch").unwrap(); let qr = parse_query(MySQL(sql)).unwrap(); let plan = builder.build_plan(&qr).unwrap(); let mut s = String::new(); let _ = plan.explain(&mut s).unwrap(); println!("Explain plan:\n{}", s) } } }
31.328413
99
0.426678
08b9aeabe353f0cb53fab8fd3278a52d05505709
30,052
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // Copyright © 2019 Intel Corporation // // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause // use std::os::unix::thread::JoinHandleExt; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Barrier, Mutex, RwLock, Weak}; use std::thread; use std::{fmt, io, result}; use libc::{c_void, siginfo_t}; use crate::device_manager::DeviceManager; #[cfg(feature = "acpi")] use acpi_tables::{aml, aml::Aml, sdt::SDT}; use arch::layout; use devices::{ioapic, BusDevice}; use kvm_bindings::CpuId; use kvm_ioctls::*; use vm_memory::{Address, GuestAddress, GuestMemoryMmap}; use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::signal::{register_signal_handler, validate_signal_num}; const VCPU_RTSIG_OFFSET: i32 = 0; // Debug I/O port #[cfg(target_arch = "x86_64")] const DEBUG_IOPORT: u16 = 0x80; const DEBUG_IOPORT_PREFIX: &str = "Debug I/O port"; /// Debug I/O port, see: /// https://www.intel.com/content/www/us/en/support/articles/000005500/boards-and-kits.html /// /// Since we're not a physical platform, we can freely assign code ranges for /// debugging specific parts of our virtual platform. pub enum DebugIoPortRange { Firmware, Bootloader, Kernel, Userspace, Custom, } impl DebugIoPortRange { fn from_u8(value: u8) -> DebugIoPortRange { match value { 0x00..=0x1f => DebugIoPortRange::Firmware, 0x20..=0x3f => DebugIoPortRange::Bootloader, 0x40..=0x5f => DebugIoPortRange::Kernel, 0x60..=0x7f => DebugIoPortRange::Userspace, _ => DebugIoPortRange::Custom, } } } impl fmt::Display for DebugIoPortRange { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { DebugIoPortRange::Firmware => write!(f, "{}: Firmware", DEBUG_IOPORT_PREFIX), DebugIoPortRange::Bootloader => write!(f, "{}: Bootloader", DEBUG_IOPORT_PREFIX), DebugIoPortRange::Kernel => write!(f, "{}: Kernel", DEBUG_IOPORT_PREFIX), DebugIoPortRange::Userspace => write!(f, "{}: Userspace", DEBUG_IOPORT_PREFIX), DebugIoPortRange::Custom => write!(f, "{}: Custom", DEBUG_IOPORT_PREFIX), } } } #[derive(Debug)] pub enum Error { /// Cannot open the VCPU file descriptor. VcpuFd(kvm_ioctls::Error), /// Cannot run the VCPUs. VcpuRun(kvm_ioctls::Error), /// Cannot spawn a new vCPU thread. VcpuSpawn(io::Error), #[cfg(target_arch = "x86_64")] /// Error configuring the general purpose registers REGSConfiguration(arch::x86_64::regs::Error), #[cfg(target_arch = "x86_64")] /// Error configuring the special registers SREGSConfiguration(arch::x86_64::regs::Error), #[cfg(target_arch = "x86_64")] /// Error configuring the floating point related registers FPUConfiguration(arch::x86_64::regs::Error), /// The call to KVM_SET_CPUID2 failed. SetSupportedCpusFailed(kvm_ioctls::Error), #[cfg(target_arch = "x86_64")] /// Cannot set the local interruption due to bad configuration. LocalIntConfiguration(arch::x86_64::interrupts::Error), #[cfg(target_arch = "x86_64")] /// Error configuring the MSR registers MSRSConfiguration(arch::x86_64::regs::Error), /// Unexpected KVM_RUN exit reason VcpuUnhandledKvmExit, /// Failed to join on vCPU threads ThreadCleanup, /// Cannot add legacy device to Bus. BusError(devices::BusError), /// Failed to allocate IO port AllocateIOPort, /// Asking for more vCPUs that we can have DesiredVCPUCountExceedsMax, } pub type Result<T> = result::Result<T, Error>; #[allow(dead_code)] #[derive(Copy, Clone)] enum CpuidReg { EAX, EBX, ECX, EDX, } pub struct CpuidPatch { pub function: u32, pub index: u32, pub flags_bit: Option<u8>, pub eax_bit: Option<u8>, pub ebx_bit: Option<u8>, pub ecx_bit: Option<u8>, pub edx_bit: Option<u8>, } impl CpuidPatch { fn set_cpuid_reg( cpuid: &mut CpuId, function: u32, index: Option<u32>, reg: CpuidReg, value: u32, ) { let entries = cpuid.as_mut_slice(); for entry in entries.iter_mut() { if entry.function == function && (index == None || index.unwrap() == entry.index) { match reg { CpuidReg::EAX => { entry.eax = value; } CpuidReg::EBX => { entry.ebx = value; } CpuidReg::ECX => { entry.ecx = value; } CpuidReg::EDX => { entry.edx = value; } } } } } pub fn patch_cpuid(cpuid: &mut CpuId, patches: Vec<CpuidPatch>) { let entries = cpuid.as_mut_slice(); for entry in entries.iter_mut() { for patch in patches.iter() { if entry.function == patch.function && entry.index == patch.index { if let Some(flags_bit) = patch.flags_bit { entry.flags |= 1 << flags_bit; } if let Some(eax_bit) = patch.eax_bit { entry.eax |= 1 << eax_bit; } if let Some(ebx_bit) = patch.ebx_bit { entry.ebx |= 1 << ebx_bit; } if let Some(ecx_bit) = patch.ecx_bit { entry.ecx |= 1 << ecx_bit; } if let Some(edx_bit) = patch.edx_bit { entry.edx |= 1 << edx_bit; } } } } } } #[repr(packed)] struct LocalAPIC { pub r#type: u8, pub length: u8, pub processor_id: u8, pub apic_id: u8, pub flags: u32, } #[repr(packed)] #[derive(Default)] struct IOAPIC { pub r#type: u8, pub length: u8, pub ioapic_id: u8, _reserved: u8, pub apic_address: u32, pub gsi_base: u32, } #[repr(packed)] #[derive(Default)] struct InterruptSourceOverride { pub r#type: u8, pub length: u8, pub bus: u8, pub source: u8, pub gsi: u32, pub flags: u16, } /// A wrapper around creating and using a kvm-based VCPU. pub struct Vcpu { fd: VcpuFd, id: u8, io_bus: Arc<devices::Bus>, mmio_bus: Arc<devices::Bus>, ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>, vm_ts: std::time::Instant, } impl Vcpu { /// Constructs a new VCPU for `vm`. /// /// # Arguments /// /// * `id` - Represents the CPU number between [0, max vcpus). /// * `vm` - The virtual machine this vcpu will get attached to. pub fn new( id: u8, fd: &Arc<VmFd>, io_bus: Arc<devices::Bus>, mmio_bus: Arc<devices::Bus>, ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>, creation_ts: std::time::Instant, ) -> Result<Self> { let kvm_vcpu = fd.create_vcpu(id).map_err(Error::VcpuFd)?; // Initially the cpuid per vCPU is the one supported by this VM. Ok(Vcpu { fd: kvm_vcpu, id, io_bus, mmio_bus, ioapic, vm_ts: creation_ts, }) } /// Configures a x86_64 specific vcpu and should be called once per vcpu from the vcpu's thread. /// /// # Arguments /// /// * `machine_config` - Specifies necessary info used for the CPUID configuration. /// * `kernel_start_addr` - Offset from `guest_mem` at which the kernel starts. /// * `vm` - The virtual machine this vcpu will get attached to. pub fn configure( &mut self, kernel_start_addr: Option<GuestAddress>, vm_memory: &Arc<RwLock<GuestMemoryMmap>>, cpuid: CpuId, ) -> Result<()> { let mut cpuid = cpuid; CpuidPatch::set_cpuid_reg(&mut cpuid, 0xb, None, CpuidReg::EDX, u32::from(self.id)); self.fd .set_cpuid2(&cpuid) .map_err(Error::SetSupportedCpusFailed)?; arch::x86_64::regs::setup_msrs(&self.fd).map_err(Error::MSRSConfiguration)?; if let Some(kernel_start_addr) = kernel_start_addr { // Safe to unwrap because this method is called after the VM is configured arch::x86_64::regs::setup_regs( &self.fd, kernel_start_addr.raw_value(), arch::x86_64::layout::BOOT_STACK_POINTER.raw_value(), arch::x86_64::layout::ZERO_PAGE_START.raw_value(), ) .map_err(Error::REGSConfiguration)?; arch::x86_64::regs::setup_fpu(&self.fd).map_err(Error::FPUConfiguration)?; arch::x86_64::regs::setup_sregs(&vm_memory.read().unwrap(), &self.fd) .map_err(Error::SREGSConfiguration)?; } arch::x86_64::interrupts::set_lint(&self.fd).map_err(Error::LocalIntConfiguration)?; Ok(()) } /// Runs the VCPU until it exits, returning the reason. /// /// Note that the state of the VCPU and associated VM must be setup first for this to do /// anything useful. pub fn run(&self) -> Result<bool> { match self.fd.run() { Ok(run) => match run { VcpuExit::IoIn(addr, data) => { self.io_bus.read(u64::from(addr), data); Ok(true) } VcpuExit::IoOut(addr, data) => { if addr == DEBUG_IOPORT && data.len() == 1 { self.log_debug_ioport(data[0]); } self.io_bus.write(u64::from(addr), data); Ok(true) } VcpuExit::MmioRead(addr, data) => { self.mmio_bus.read(addr as u64, data); Ok(true) } VcpuExit::MmioWrite(addr, data) => { self.mmio_bus.write(addr as u64, data); Ok(true) } VcpuExit::IoapicEoi(vector) => { if let Some(ioapic) = &self.ioapic { ioapic.lock().unwrap().end_of_interrupt(vector); } Ok(true) } VcpuExit::Shutdown => { // Triple fault to trigger a reboot Ok(false) } r => { error!("Unexpected exit reason on vcpu run: {:?}", r); Err(Error::VcpuUnhandledKvmExit) } }, Err(ref e) => match e.errno() { libc::EAGAIN | libc::EINTR => Ok(true), _ => { error!("VCPU {:?} error {:?}", self.id, e); Err(Error::VcpuUnhandledKvmExit) } }, } } // Log debug io port codes. fn log_debug_ioport(&self, code: u8) { let ts = self.vm_ts.elapsed(); debug!( "[{} code 0x{:x}] {}.{:>06} seconds", DebugIoPortRange::from_u8(code), code, ts.as_secs(), ts.as_micros() ); } } pub struct CpuManager { boot_vcpus: u8, max_vcpus: u8, io_bus: Weak<devices::Bus>, mmio_bus: Arc<devices::Bus>, ioapic: Option<Arc<Mutex<ioapic::Ioapic>>>, vm_memory: Arc<RwLock<GuestMemoryMmap>>, cpuid: CpuId, fd: Arc<VmFd>, vcpus_kill_signalled: Arc<AtomicBool>, vcpus_pause_signalled: Arc<AtomicBool>, reset_evt: EventFd, vcpu_states: Vec<VcpuState>, selected_cpu: u8, } const CPU_ENABLE_FLAG: usize = 0; const CPU_STATUS_OFFSET: u64 = 4; const CPU_SELECTION_OFFSET: u64 = 0; impl BusDevice for CpuManager { fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) { match offset { CPU_STATUS_OFFSET => { if self.selected_cpu < self.present_vcpus() { let state = &self.vcpu_states[usize::from(self.selected_cpu)]; if state.active() { data[0] |= 1 << CPU_ENABLE_FLAG; } } } _ => { warn!( "Unexpected offset for accessing CPU manager device: {:#}", offset ); } } } fn write(&mut self, _base: u64, offset: u64, data: &[u8]) { match offset { CPU_SELECTION_OFFSET => { self.selected_cpu = data[0]; } _ => { warn!( "Unexpected offset for accessing CPU manager device: {:#}", offset ); } } } } struct VcpuState { handle: Option<thread::JoinHandle<()>>, } impl VcpuState { fn active(&self) -> bool { self.handle.is_some() } fn signal_thread(&self) { if let Some(handle) = self.handle.as_ref() { let signum = validate_signal_num(VCPU_RTSIG_OFFSET, true).unwrap(); unsafe { libc::pthread_kill(handle.as_pthread_t(), signum); } } } fn join_thread(&mut self) -> Result<()> { if let Some(handle) = self.handle.take() { handle.join().map_err(|_| Error::ThreadCleanup)? } Ok(()) } fn unpark_thread(&self) { if let Some(handle) = self.handle.as_ref() { handle.thread().unpark() } } } impl CpuManager { pub fn new( boot_vcpus: u8, max_vcpus: u8, device_manager: &DeviceManager, guest_memory: Arc<RwLock<GuestMemoryMmap>>, fd: Arc<VmFd>, cpuid: CpuId, reset_evt: EventFd, ) -> Result<Arc<Mutex<CpuManager>>> { let cpu_manager = Arc::new(Mutex::new(CpuManager { boot_vcpus, max_vcpus, io_bus: Arc::downgrade(&device_manager.io_bus().clone()), mmio_bus: device_manager.mmio_bus().clone(), ioapic: device_manager.ioapic().clone(), vm_memory: guest_memory, cpuid, fd, vcpus_kill_signalled: Arc::new(AtomicBool::new(false)), vcpus_pause_signalled: Arc::new(AtomicBool::new(false)), vcpu_states: Vec::with_capacity(max_vcpus as usize), reset_evt, selected_cpu: 0, })); device_manager .allocator() .lock() .unwrap() .allocate_io_addresses(Some(GuestAddress(0x0cd8)), 0x8, None) .ok_or(Error::AllocateIOPort)?; cpu_manager .lock() .unwrap() .io_bus .upgrade() .unwrap() .insert(cpu_manager.clone(), 0x0cd8, 0xc) .map_err(Error::BusError)?; Ok(cpu_manager) } fn activate_vcpus( &mut self, desired_vcpus: u8, entry_addr: Option<GuestAddress>, ) -> Result<()> { if desired_vcpus > self.max_vcpus { return Err(Error::DesiredVCPUCountExceedsMax); } let creation_ts = std::time::Instant::now(); let vcpu_thread_barrier = Arc::new(Barrier::new( (desired_vcpus - self.present_vcpus() + 1) as usize, )); for cpu_id in self.present_vcpus()..desired_vcpus { let ioapic = if let Some(ioapic) = &self.ioapic { Some(ioapic.clone()) } else { None }; let mut vcpu = Vcpu::new( cpu_id, &self.fd, self.io_bus.clone().upgrade().unwrap(), self.mmio_bus.clone(), ioapic, creation_ts, )?; let vcpu_thread_barrier = vcpu_thread_barrier.clone(); let reset_evt = self.reset_evt.try_clone().unwrap(); let vcpu_kill_signalled = self.vcpus_kill_signalled.clone(); let vcpu_pause_signalled = self.vcpus_pause_signalled.clone(); let vm_memory = self.vm_memory.clone(); let cpuid = self.cpuid.clone(); let handle = Some( thread::Builder::new() .name(format!("vcpu{}", vcpu.id)) .spawn(move || { unsafe { extern "C" fn handle_signal(_: i32, _: *mut siginfo_t, _: *mut c_void) { } // This uses an async signal safe handler to kill the vcpu handles. register_signal_handler( VCPU_RTSIG_OFFSET, vmm_sys_util::signal::SignalHandler::Siginfo(handle_signal), true, 0, ) .expect("Failed to register vcpu signal handler"); } vcpu.configure(entry_addr, &vm_memory, cpuid) .expect("Failed to configure vCPU"); // Block until all CPUs are ready. vcpu_thread_barrier.wait(); loop { // vcpu.run() returns false on a KVM_EXIT_SHUTDOWN (triple-fault) so trigger a reset match vcpu.run() { Err(e) => { error!("VCPU generated error: {:?}", e); break; } Ok(true) => {} Ok(false) => { reset_evt.write(1).unwrap(); break; } } // We've been told to terminate if vcpu_kill_signalled.load(Ordering::SeqCst) { break; } // If we are being told to pause, we park the thread // until the pause boolean is toggled. // The resume operation is responsible for toggling // the boolean and unpark the thread. // We enter a loop because park() could spuriously // return. We will then park() again unless the // pause boolean has been toggled. while vcpu_pause_signalled.load(Ordering::SeqCst) { thread::park(); } } }) .map_err(Error::VcpuSpawn)?, ); self.vcpu_states.push(VcpuState { handle }); } // Unblock all CPU threads. vcpu_thread_barrier.wait(); Ok(()) } // Starts all the vCPUs that the VM is booting with. Blocks until all vCPUs are running. pub fn start_boot_vcpus(&mut self, entry_addr: GuestAddress) -> Result<()> { self.activate_vcpus(self.boot_vcpus(), Some(entry_addr)) } pub fn resize(&mut self, desired_vcpus: u8) -> Result<()> { self.activate_vcpus(desired_vcpus, None) } pub fn shutdown(&mut self) -> Result<()> { // Tell the vCPUs to stop themselves next time they go through the loop self.vcpus_kill_signalled.store(true, Ordering::SeqCst); // Signal to the spawned threads (vCPUs and console signal handler). For the vCPU threads // this will interrupt the KVM_RUN ioctl() allowing the loop to check the boolean set // above. for state in self.vcpu_states.iter() { state.signal_thread(); } // Wait for all the threads to finish. This removes the state from the vector. for mut state in self.vcpu_states.drain(..) { state.join_thread()?; } Ok(()) } pub fn pause(&self) -> Result<()> { // Tell the vCPUs to pause themselves next time they exit self.vcpus_pause_signalled.store(true, Ordering::SeqCst); // Signal to the spawned threads (vCPUs and console signal handler). For the vCPU threads // this will interrupt the KVM_RUN ioctl() allowing the loop to check the boolean set // above. for state in self.vcpu_states.iter() { state.signal_thread(); } Ok(()) } pub fn resume(&self) -> Result<()> { // Toggle the vCPUs pause boolean self.vcpus_pause_signalled.store(false, Ordering::SeqCst); // Unpark all the VCPU threads. // Once unparked, the next thing they will do is checking for the pause // boolean. Since it'll be set to false, they will exit their pause loop // and go back to vmx root. for state in self.vcpu_states.iter() { state.unpark_thread(); } Ok(()) } pub fn boot_vcpus(&self) -> u8 { self.boot_vcpus } pub fn max_vcpus(&self) -> u8 { self.max_vcpus } fn present_vcpus(&self) -> u8 { self.vcpu_states.len() as u8 } #[cfg(feature = "acpi")] pub fn create_madt(&self) -> SDT { // This is also checked in the commandline parsing. assert!(self.boot_vcpus <= self.max_vcpus); let mut madt = SDT::new(*b"APIC", 44, 5, *b"CLOUDH", *b"CHMADT ", 1); madt.write(36, layout::APIC_START); for cpu in 0..self.max_vcpus { let lapic = LocalAPIC { r#type: 0, length: 8, processor_id: cpu, apic_id: cpu, flags: if cpu < self.boot_vcpus { 1 << MADT_CPU_ENABLE_FLAG } else { 0 }, }; madt.append(lapic); } madt.append(IOAPIC { r#type: 1, length: 12, ioapic_id: 0, apic_address: layout::IOAPIC_START.0 as u32, gsi_base: 0, ..Default::default() }); madt.append(InterruptSourceOverride { r#type: 2, length: 10, bus: 0, source: 4, gsi: 4, flags: 0, }); madt } } struct CPU { cpu_id: u8, } const MADT_CPU_ENABLE_FLAG: usize = 0; #[cfg(feature = "acpi")] impl Aml for CPU { fn to_aml_bytes(&self) -> Vec<u8> { let lapic = LocalAPIC { r#type: 0, length: 8, processor_id: self.cpu_id, apic_id: self.cpu_id, flags: 1 << MADT_CPU_ENABLE_FLAG, }; let mut mat_data: Vec<u8> = Vec::new(); mat_data.resize(std::mem::size_of_val(&lapic), 0); unsafe { *(mat_data.as_mut_ptr() as *mut LocalAPIC) = lapic }; aml::Device::new( format!("C{:03}", self.cpu_id).as_str().into(), vec![ &aml::Name::new("_HID".into(), &"ACPI0007"), &aml::Name::new("_UID".into(), &self.cpu_id), /* _STA return value: Bit [0] – Set if the device is present. Bit [1] – Set if the device is enabled and decoding its resources. Bit [2] – Set if the device should be shown in the UI. Bit [3] – Set if the device is functioning properly (cleared if device failed its diagnostics). Bit [4] – Set if the battery is present. Bits [31:5] – Reserved (must be cleared). */ &aml::Method::new( "_STA".into(), 0, false, // Call into CSTA method which will interrogate device vec![&aml::Return::new(&aml::MethodCall::new( "CSTA".into(), vec![&self.cpu_id], ))], ), // The Linux kernel expects every CPU device to have a _MAT entry // containing the LAPIC for this processor with the enabled bit set // even it if is disabled in the MADT (non-boot CPU) &aml::Name::new("_MAT".into(), &aml::Buffer::new(mat_data)), ], ) .to_aml_bytes() } } struct CPUMethods { max_vcpus: u8, } #[cfg(feature = "acpi")] impl Aml for CPUMethods { fn to_aml_bytes(&self) -> Vec<u8> { let mut bytes = Vec::new(); bytes.extend_from_slice( // CPU status method &aml::Method::new( "CSTA".into(), 1, true, vec![ // Take lock defined above &aml::Acquire::new("\\_SB_.PRES.CPLK".into(), 0xfff), // Write CPU number (in first argument) to I/O port via field &aml::Store::new(&aml::Path::new("\\_SB_.PRES.CSEL"), &aml::Arg(0)), &aml::Store::new(&aml::Local(0), &aml::ZERO), // Check if CPEN bit is set, if so make the local variable 0xf (see _STA for details of meaning) &aml::If::new( &aml::Equal::new(&aml::Path::new("\\_SB_.PRES.CPEN"), &aml::ONE), vec![&aml::Store::new(&aml::Local(0), &0xfu8)], ), // Release lock &aml::Release::new("\\_SB_.PRES.CPLK".into()), // Return 0 or 0xf &aml::Return::new(&aml::Local(0)), ], ) .to_aml_bytes(), ); let mut paths = Vec::new(); for cpu_id in 0..self.max_vcpus { paths.push(aml::Path::new(format!("C{:03}", cpu_id).as_str())) } let mut notify_methods = Vec::new(); for cpu_id in 0..self.max_vcpus { notify_methods.push(aml::Notify::new(&paths[usize::from(cpu_id)], &aml::ONE)); } let mut notify_methods_inner: Vec<&dyn aml::Aml> = Vec::new(); for notify_method in notify_methods.iter() { notify_methods_inner.push(notify_method); } bytes.extend_from_slice( // Notify all vCPUs &aml::Method::new("CTFY".into(), 0, true, notify_methods_inner).to_aml_bytes(), ); bytes } } #[cfg(feature = "acpi")] impl Aml for CpuManager { fn to_aml_bytes(&self) -> Vec<u8> { let mut bytes = Vec::new(); // CPU hotplug controller bytes.extend_from_slice( &aml::Device::new( "_SB_.PRES".into(), vec![ &aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0A06")), // Mutex to protect concurrent access as we write to choose CPU and then read back status &aml::Mutex::new("CPLK".into(), 0), // I/O port for CPU controller &aml::Name::new( "_CRS".into(), &aml::ResourceTemplate::new(vec![&aml::IO::new( 0x0cd8, 0x0cd8, 0x01, 0x0c, )]), ), // OpRegion and Fields map I/O port into individual field values &aml::OpRegion::new("PRST".into(), aml::OpRegionSpace::SystemIO, 0x0cd8, 0x0c), &aml::Field::new( "PRST".into(), aml::FieldAccessType::Byte, aml::FieldUpdateRule::WriteAsZeroes, vec![ aml::FieldEntry::Reserved(32), aml::FieldEntry::Named(*b"CPEN", 1), aml::FieldEntry::Named(*b"CINS", 1), aml::FieldEntry::Named(*b"CRMV", 1), aml::FieldEntry::Named(*b"CEJ0", 1), aml::FieldEntry::Reserved(4), aml::FieldEntry::Named(*b"CCMD", 8), ], ), &aml::Field::new( "PRST".into(), aml::FieldAccessType::DWord, aml::FieldUpdateRule::Preserve, vec![ aml::FieldEntry::Named(*b"CSEL", 32), aml::FieldEntry::Reserved(32), aml::FieldEntry::Named(*b"CDAT", 32), ], ), ], ) .to_aml_bytes(), ); // CPU devices let hid = aml::Name::new("_HID".into(), &"ACPI0010"); let uid = aml::Name::new("_CID".into(), &aml::EISAName::new("PNP0A05")); // Bundle methods together under a common object let methods = CPUMethods { max_vcpus: self.max_vcpus, }; let mut cpu_data_inner: Vec<&dyn aml::Aml> = vec![&hid, &uid, &methods]; let mut cpu_devices = Vec::new(); for cpu_id in 0..self.max_vcpus { let cpu_device = CPU { cpu_id }; cpu_devices.push(cpu_device); } for cpu_device in cpu_devices.iter() { cpu_data_inner.push(cpu_device); } bytes.extend_from_slice( &aml::Device::new("_SB_.CPUS".into(), cpu_data_inner).to_aml_bytes(), ); bytes } }
32.87965
116
0.499834
6a3d694e64080742c2d97c5dbcf89cb1e45af0ad
87,500
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. use crate::bindings; use crate::error::attach_handle_to_error; use crate::error::generic_error; use crate::error::ErrWithV8Handle; use crate::error::JsError; use crate::inspector::JsRuntimeInspector; use crate::module_specifier::ModuleSpecifier; use crate::modules::ModuleId; use crate::modules::ModuleLoadId; use crate::modules::ModuleLoader; use crate::modules::ModuleMap; use crate::modules::NoopModuleLoader; use crate::ops::*; use crate::Extension; use crate::OpMiddlewareFn; use crate::OpPayload; use crate::OpResult; use crate::OpState; use crate::PromiseId; use anyhow::Error; use futures::channel::oneshot; use futures::future::poll_fn; use futures::future::FutureExt; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use futures::task::AtomicWaker; use std::any::Any; use std::cell::RefCell; use std::collections::HashMap; use std::collections::HashSet; use std::ffi::c_void; use std::mem::forget; use std::option::Option; use std::rc::Rc; use std::sync::Arc; use std::sync::Mutex; use std::sync::Once; use std::task::Context; use std::task::Poll; type PendingOpFuture = OpCall<(PromiseId, OpId, OpResult)>; pub enum Snapshot { Static(&'static [u8]), JustCreated(v8::StartupData), Boxed(Box<[u8]>), } pub type JsErrorCreateFn = dyn Fn(JsError) -> Error; pub type GetErrorClassFn = &'static dyn for<'e> Fn(&'e Error) -> &'static str; /// Objects that need to live as long as the isolate #[derive(Default)] struct IsolateAllocations { near_heap_limit_callback_data: Option<(Box<RefCell<dyn Any>>, v8::NearHeapLimitCallback)>, } /// A single execution context of JavaScript. Corresponds roughly to the "Web /// Worker" concept in the DOM. A JsRuntime is a Future that can be used with /// an event loop (Tokio, async_std). //// /// The JsRuntime future completes when there is an error or when all /// pending ops have completed. /// /// Pending ops are created in JavaScript by calling Deno.core.opAsync(), and in Rust /// by implementing an async function that takes a serde::Deserialize "control argument" /// and an optional zero copy buffer, each async Op is tied to a Promise in JavaScript. pub struct JsRuntime { // This is an Option<OwnedIsolate> instead of just OwnedIsolate to workaround // a safety issue with SnapshotCreator. See JsRuntime::drop. v8_isolate: Option<v8::OwnedIsolate>, // This is an Option<Box<JsRuntimeInspector> instead of just Box<JsRuntimeInspector> // to workaround a safety issue. See JsRuntime::drop. inspector: Option<Box<JsRuntimeInspector>>, snapshot_creator: Option<v8::SnapshotCreator>, has_snapshotted: bool, allocations: IsolateAllocations, extensions: Vec<Extension>, } struct DynImportModEvaluate { load_id: ModuleLoadId, module_id: ModuleId, promise: v8::Global<v8::Promise>, module: v8::Global<v8::Module>, } struct ModEvaluate { promise: v8::Global<v8::Promise>, sender: oneshot::Sender<Result<(), Error>>, } pub struct CrossIsolateStore<T>(Arc<Mutex<CrossIsolateStoreInner<T>>>); struct CrossIsolateStoreInner<T> { map: HashMap<u32, T>, last_id: u32, } impl<T> CrossIsolateStore<T> { pub(crate) fn insert(&self, value: T) -> u32 { let mut store = self.0.lock().unwrap(); let last_id = store.last_id; store.map.insert(last_id, value); store.last_id += 1; last_id } pub(crate) fn take(&self, id: u32) -> Option<T> { let mut store = self.0.lock().unwrap(); store.map.remove(&id) } } impl<T> Default for CrossIsolateStore<T> { fn default() -> Self { CrossIsolateStore(Arc::new(Mutex::new(CrossIsolateStoreInner { map: Default::default(), last_id: 0, }))) } } impl<T> Clone for CrossIsolateStore<T> { fn clone(&self) -> Self { Self(self.0.clone()) } } pub type SharedArrayBufferStore = CrossIsolateStore<v8::SharedRef<v8::BackingStore>>; pub type CompiledWasmModuleStore = CrossIsolateStore<v8::CompiledWasmModule>; /// Internal state for JsRuntime which is stored in one of v8::Isolate's /// embedder slots. pub(crate) struct JsRuntimeState { pub global_context: Option<v8::Global<v8::Context>>, pub(crate) js_recv_cb: Option<v8::Global<v8::Function>>, pub(crate) js_sync_cb: Option<v8::Global<v8::Function>>, pub(crate) js_macrotask_cbs: Vec<v8::Global<v8::Function>>, pub(crate) js_nexttick_cbs: Vec<v8::Global<v8::Function>>, pub(crate) js_promise_reject_cb: Option<v8::Global<v8::Function>>, pub(crate) js_uncaught_exception_cb: Option<v8::Global<v8::Function>>, pub(crate) has_tick_scheduled: bool, pub(crate) js_wasm_streaming_cb: Option<v8::Global<v8::Function>>, pub(crate) pending_promise_exceptions: HashMap<v8::Global<v8::Promise>, v8::Global<v8::Value>>, pending_dyn_mod_evaluate: Vec<DynImportModEvaluate>, pending_mod_evaluate: Option<ModEvaluate>, /// A counter used to delay our dynamic import deadlock detection by one spin /// of the event loop. dyn_module_evaluate_idle_counter: u32, pub(crate) js_error_create_fn: Rc<JsErrorCreateFn>, pub(crate) pending_ops: FuturesUnordered<PendingOpFuture>, pub(crate) unrefed_ops: HashSet<i32>, pub(crate) have_unpolled_ops: bool, pub(crate) op_state: Rc<RefCell<OpState>>, pub(crate) shared_array_buffer_store: Option<SharedArrayBufferStore>, pub(crate) compiled_wasm_module_store: Option<CompiledWasmModuleStore>, waker: AtomicWaker, } impl Drop for JsRuntime { fn drop(&mut self) { // The Isolate object must outlive the Inspector object, but this is // currently not enforced by the type system. self.inspector.take(); if let Some(creator) = self.snapshot_creator.take() { // TODO(ry): in rusty_v8, `SnapShotCreator::get_owned_isolate()` returns // a `struct OwnedIsolate` which is not actually owned, hence the need // here to leak the `OwnedIsolate` in order to avoid a double free and // the segfault that it causes. let v8_isolate = self.v8_isolate.take().unwrap(); forget(v8_isolate); // TODO(ry) V8 has a strange assert which prevents a SnapshotCreator from // being deallocated if it hasn't created a snapshot yet. // https://github.com/v8/v8/blob/73212783fbd534fac76cc4b66aac899c13f71fc8/src/api.cc#L603 // If that assert is removed, this if guard could be removed. // WARNING: There may be false positive LSAN errors here. if self.has_snapshotted { drop(creator); } } } } fn v8_init(v8_platform: Option<v8::SharedRef<v8::Platform>>) { // Include 10MB ICU data file. #[repr(C, align(16))] struct IcuData([u8; 10144432]); static ICU_DATA: IcuData = IcuData(*include_bytes!("icudtl.dat")); v8::icu::set_common_data_69(&ICU_DATA.0).unwrap(); let v8_platform = v8_platform .unwrap_or_else(|| v8::new_default_platform(0, false).make_shared()); v8::V8::initialize_platform(v8_platform); v8::V8::initialize(); let flags = concat!( " --experimental-wasm-threads", " --wasm-test-streaming", " --harmony-import-assertions", " --no-validate-asm", ); v8::V8::set_flags_from_string(flags); } #[derive(Default)] pub struct RuntimeOptions { /// Allows a callback to be set whenever a V8 exception is made. This allows /// the caller to wrap the JsError into an error. By default this callback /// is set to `JsError::create()`. pub js_error_create_fn: Option<Rc<JsErrorCreateFn>>, /// Allows to map error type to a string "class" used to represent /// error in JavaScript. pub get_error_class_fn: Option<GetErrorClassFn>, /// Implementation of `ModuleLoader` which will be /// called when V8 requests to load ES modules. /// /// If not provided runtime will error if code being /// executed tries to load modules. pub module_loader: Option<Rc<dyn ModuleLoader>>, /// JsRuntime extensions, not to be confused with ES modules /// these are sets of ops and other JS code to be initialized. pub extensions: Vec<Extension>, /// V8 snapshot that should be loaded on startup. /// /// Currently can't be used with `will_snapshot`. pub startup_snapshot: Option<Snapshot>, /// Prepare runtime to take snapshot of loaded code. /// /// Currently can't be used with `startup_snapshot`. pub will_snapshot: bool, /// Isolate creation parameters. pub create_params: Option<v8::CreateParams>, /// V8 platform instance to use. Used when Deno initializes V8 /// (which it only does once), otherwise it's silenty dropped. pub v8_platform: Option<v8::SharedRef<v8::Platform>>, /// The store to use for transferring SharedArrayBuffers between isolates. /// If multiple isolates should have the possibility of sharing /// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If /// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be /// serialized. pub shared_array_buffer_store: Option<SharedArrayBufferStore>, /// The store to use for transferring `WebAssembly.Module` objects between /// isolates. /// If multiple isolates should have the possibility of sharing /// `WebAssembly.Module` objects, they should use the same /// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified, /// `WebAssembly.Module` objects cannot be serialized. pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>, } impl JsRuntime { /// Only constructor, configuration is done through `options`. pub fn new(mut options: RuntimeOptions) -> Self { let v8_platform = options.v8_platform.take(); static DENO_INIT: Once = Once::new(); DENO_INIT.call_once(move || v8_init(v8_platform)); let has_startup_snapshot = options.startup_snapshot.is_some(); let global_context; let (mut isolate, maybe_snapshot_creator) = if options.will_snapshot { // TODO(ry) Support loading snapshots before snapshotting. assert!(options.startup_snapshot.is_none()); let mut creator = v8::SnapshotCreator::new(Some(&bindings::EXTERNAL_REFERENCES)); let isolate = unsafe { creator.get_owned_isolate() }; let mut isolate = JsRuntime::setup_isolate(isolate); { let scope = &mut v8::HandleScope::new(&mut isolate); let context = bindings::initialize_context(scope); global_context = v8::Global::new(scope, context); creator.set_default_context(context); } (isolate, Some(creator)) } else { let mut params = options .create_params .take() .unwrap_or_else(v8::Isolate::create_params) .external_references(&**bindings::EXTERNAL_REFERENCES); let snapshot_loaded = if let Some(snapshot) = options.startup_snapshot { params = match snapshot { Snapshot::Static(data) => params.snapshot_blob(data), Snapshot::JustCreated(data) => params.snapshot_blob(data), Snapshot::Boxed(data) => params.snapshot_blob(data), }; true } else { false }; let isolate = v8::Isolate::new(params); let mut isolate = JsRuntime::setup_isolate(isolate); { let scope = &mut v8::HandleScope::new(&mut isolate); let context = if snapshot_loaded { v8::Context::new(scope) } else { // If no snapshot is provided, we initialize the context with empty // main source code and source maps. bindings::initialize_context(scope) }; global_context = v8::Global::new(scope, context); } (isolate, None) }; let inspector = JsRuntimeInspector::new(&mut isolate, global_context.clone()); let loader = options .module_loader .unwrap_or_else(|| Rc::new(NoopModuleLoader)); let js_error_create_fn = options .js_error_create_fn .unwrap_or_else(|| Rc::new(JsError::create)); let mut op_state = OpState::new(); if let Some(get_error_class_fn) = options.get_error_class_fn { op_state.get_error_class_fn = get_error_class_fn; } let op_state = Rc::new(RefCell::new(op_state)); isolate.set_slot(Rc::new(RefCell::new(JsRuntimeState { global_context: Some(global_context), pending_promise_exceptions: HashMap::new(), pending_dyn_mod_evaluate: vec![], pending_mod_evaluate: None, dyn_module_evaluate_idle_counter: 0, js_recv_cb: None, js_sync_cb: None, js_macrotask_cbs: vec![], js_nexttick_cbs: vec![], js_promise_reject_cb: None, js_uncaught_exception_cb: None, has_tick_scheduled: false, js_wasm_streaming_cb: None, js_error_create_fn, pending_ops: FuturesUnordered::new(), unrefed_ops: HashSet::new(), shared_array_buffer_store: options.shared_array_buffer_store, compiled_wasm_module_store: options.compiled_wasm_module_store, op_state: op_state.clone(), have_unpolled_ops: false, waker: AtomicWaker::new(), }))); let module_map = ModuleMap::new(loader, op_state); isolate.set_slot(Rc::new(RefCell::new(module_map))); // Add builtins extension options .extensions .insert(0, crate::ops_builtin::init_builtins()); let mut js_runtime = Self { v8_isolate: Some(isolate), inspector: Some(inspector), snapshot_creator: maybe_snapshot_creator, has_snapshotted: false, allocations: IsolateAllocations::default(), extensions: options.extensions, }; // TODO(@AaronO): diff extensions inited in snapshot and those provided // for now we assume that snapshot and extensions always match if !has_startup_snapshot { js_runtime.init_extension_js().unwrap(); } // Init extension ops js_runtime.init_extension_ops().unwrap(); // Init callbacks (opresolve & syncOpsCache) js_runtime.init_cbs(); // Sync ops cache js_runtime.sync_ops_cache(); js_runtime } pub fn global_context(&mut self) -> v8::Global<v8::Context> { let state = Self::state(self.v8_isolate()); let state = state.borrow(); state.global_context.clone().unwrap() } pub fn v8_isolate(&mut self) -> &mut v8::OwnedIsolate { self.v8_isolate.as_mut().unwrap() } pub fn inspector(&mut self) -> &mut Box<JsRuntimeInspector> { self.inspector.as_mut().unwrap() } pub fn handle_scope(&mut self) -> v8::HandleScope { let context = self.global_context(); v8::HandleScope::with_context(self.v8_isolate(), context) } fn setup_isolate(mut isolate: v8::OwnedIsolate) -> v8::OwnedIsolate { isolate.set_capture_stack_trace_for_uncaught_exceptions(true, 10); isolate.set_promise_reject_callback(bindings::promise_reject_callback); isolate.set_host_initialize_import_meta_object_callback( bindings::host_initialize_import_meta_object_callback, ); isolate.set_host_import_module_dynamically_callback( bindings::host_import_module_dynamically_callback, ); isolate } pub(crate) fn state(isolate: &v8::Isolate) -> Rc<RefCell<JsRuntimeState>> { let s = isolate.get_slot::<Rc<RefCell<JsRuntimeState>>>().unwrap(); s.clone() } pub(crate) fn module_map(isolate: &v8::Isolate) -> Rc<RefCell<ModuleMap>> { let module_map = isolate.get_slot::<Rc<RefCell<ModuleMap>>>().unwrap(); module_map.clone() } /// Initializes JS of provided Extensions fn init_extension_js(&mut self) -> Result<(), Error> { // Take extensions to avoid double-borrow let mut extensions: Vec<Extension> = std::mem::take(&mut self.extensions); for m in extensions.iter_mut() { let js_files = m.init_js(); for (filename, source) in js_files { let source = source()?; // TODO(@AaronO): use JsRuntime::execute_static() here to move src off heap self.execute_script(filename, &source)?; } } // Restore extensions self.extensions = extensions; Ok(()) } /// Initializes ops of provided Extensions fn init_extension_ops(&mut self) -> Result<(), Error> { let op_state = self.op_state(); // Take extensions to avoid double-borrow let mut extensions: Vec<Extension> = std::mem::take(&mut self.extensions); // Middleware let middleware: Vec<Box<OpMiddlewareFn>> = extensions .iter_mut() .filter_map(|e| e.init_middleware()) .collect(); // macroware wraps an opfn in all the middleware let macroware = move |name, opfn| middleware.iter().fold(opfn, |opfn, m| m(name, opfn)); // Register ops for e in extensions.iter_mut() { e.init_state(&mut op_state.borrow_mut())?; // Register each op after middlewaring it let ops = e.init_ops().unwrap_or_default(); for (name, opfn) in ops { self.register_op(name, macroware(name, opfn)); } } // Restore extensions self.extensions = extensions; Ok(()) } /// Grab a Global handle to a function returned by the given expression fn grab_fn( scope: &mut v8::HandleScope, code: &str, ) -> v8::Global<v8::Function> { let code = v8::String::new(scope, code).unwrap(); let script = v8::Script::compile(scope, code, None).unwrap(); let v8_value = script.run(scope).unwrap(); let cb = v8::Local::<v8::Function>::try_from(v8_value).unwrap(); v8::Global::new(scope, cb) } /// Grabs a reference to core.js' opresolve & syncOpsCache() fn init_cbs(&mut self) { let mut scope = self.handle_scope(); let recv_cb = Self::grab_fn(&mut scope, "Deno.core.opresolve"); let sync_cb = Self::grab_fn(&mut scope, "Deno.core.syncOpsCache"); // Put global handles in state let state_rc = JsRuntime::state(&scope); let mut state = state_rc.borrow_mut(); state.js_recv_cb.replace(recv_cb); state.js_sync_cb.replace(sync_cb); } /// Ensures core.js has the latest op-name to op-id mappings pub fn sync_ops_cache(&mut self) { let scope = &mut self.handle_scope(); let state_rc = JsRuntime::state(scope); let js_sync_cb_handle = state_rc.borrow().js_sync_cb.clone().unwrap(); let js_sync_cb = js_sync_cb_handle.open(scope); let this = v8::undefined(scope).into(); js_sync_cb.call(scope, this, &[]); } /// Returns the runtime's op state, which can be used to maintain ops /// and access resources between op calls. pub fn op_state(&mut self) -> Rc<RefCell<OpState>> { let state_rc = Self::state(self.v8_isolate()); let state = state_rc.borrow(); state.op_state.clone() } /// Executes traditional JavaScript code (traditional = not ES modules). /// /// The execution takes place on the current global context, so it is possible /// to maintain local JS state and invoke this method multiple times. /// /// `name` can be a filepath or any other string, eg. /// /// - "/some/file/path.js" /// - "<anon>" /// - "[native code]" /// /// The same `name` value can be used for multiple executions. /// /// `Error` can be downcast to a type that exposes additional information /// about the V8 exception. By default this type is `JsError`, however it may /// be a different type if `RuntimeOptions::js_error_create_fn` has been set. pub fn execute_script( &mut self, name: &str, source_code: &str, ) -> Result<v8::Global<v8::Value>, Error> { let scope = &mut self.handle_scope(); let source = v8::String::new(scope, source_code).unwrap(); let name = v8::String::new(scope, name).unwrap(); let origin = bindings::script_origin(scope, name); let tc_scope = &mut v8::TryCatch::new(scope); let script = match v8::Script::compile(tc_scope, source, Some(&origin)) { Some(script) => script, None => { let exception = tc_scope.exception().unwrap(); return exception_to_err_result(tc_scope, exception, false); } }; match script.run(tc_scope) { Some(value) => { let value_handle = v8::Global::new(tc_scope, value); Ok(value_handle) } None => { assert!(tc_scope.has_caught()); let exception = tc_scope.exception().unwrap(); exception_to_err_result(tc_scope, exception, false) } } } /// Takes a snapshot. The isolate should have been created with will_snapshot /// set to true. /// /// `Error` can be downcast to a type that exposes additional information /// about the V8 exception. By default this type is `JsError`, however it may /// be a different type if `RuntimeOptions::js_error_create_fn` has been set. pub fn snapshot(&mut self) -> v8::StartupData { assert!(self.snapshot_creator.is_some()); let state = Self::state(self.v8_isolate()); // Note: create_blob() method must not be called from within a HandleScope. // TODO(piscisaureus): The rusty_v8 type system should enforce this. state.borrow_mut().global_context.take(); self.inspector.take(); // Overwrite existing ModuleMap to drop v8::Global handles self .v8_isolate() .set_slot(Rc::new(RefCell::new(ModuleMap::new( Rc::new(NoopModuleLoader), state.borrow().op_state.clone(), )))); // Drop other v8::Global handles before snapshotting std::mem::take(&mut state.borrow_mut().js_recv_cb); std::mem::take(&mut state.borrow_mut().js_sync_cb); let snapshot_creator = self.snapshot_creator.as_mut().unwrap(); let snapshot = snapshot_creator .create_blob(v8::FunctionCodeHandling::Keep) .unwrap(); self.has_snapshotted = true; snapshot } /// Registers an op that can be called from JavaScript. /// /// The _op_ mechanism allows to expose Rust functions to the JS runtime, /// which can be called using the provided `name`. /// /// This function provides byte-level bindings. To pass data via JSON, the /// following functions can be passed as an argument for `op_fn`: /// * [op_sync()](fn.op_sync.html) /// * [op_async()](fn.op_async.html) pub fn register_op<F>(&mut self, name: &str, op_fn: F) -> OpId where F: Fn(Rc<RefCell<OpState>>, OpPayload) -> Op + 'static, { Self::state(self.v8_isolate()) .borrow_mut() .op_state .borrow_mut() .op_table .register_op(name, op_fn) } /// Registers a callback on the isolate when the memory limits are approached. /// Use this to prevent V8 from crashing the process when reaching the limit. /// /// Calls the closure with the current heap limit and the initial heap limit. /// The return value of the closure is set as the new limit. pub fn add_near_heap_limit_callback<C>(&mut self, cb: C) where C: FnMut(usize, usize) -> usize + 'static, { let boxed_cb = Box::new(RefCell::new(cb)); let data = boxed_cb.as_ptr() as *mut c_void; let prev = self .allocations .near_heap_limit_callback_data .replace((boxed_cb, near_heap_limit_callback::<C>)); if let Some((_, prev_cb)) = prev { self .v8_isolate() .remove_near_heap_limit_callback(prev_cb, 0); } self .v8_isolate() .add_near_heap_limit_callback(near_heap_limit_callback::<C>, data); } pub fn remove_near_heap_limit_callback(&mut self, heap_limit: usize) { if let Some((_, cb)) = self.allocations.near_heap_limit_callback_data.take() { self .v8_isolate() .remove_near_heap_limit_callback(cb, heap_limit); } } fn pump_v8_message_loop(&mut self) { let scope = &mut self.handle_scope(); while v8::Platform::pump_message_loop( &v8::V8::get_current_platform(), scope, false, // don't block if there are no tasks ) { // do nothing } scope.perform_microtask_checkpoint(); } /// Waits for the given value to resolve while polling the event loop. /// /// This future resolves when either the value is resolved or the event loop runs to /// completion. pub async fn resolve_value( &mut self, global: v8::Global<v8::Value>, ) -> Result<v8::Global<v8::Value>, Error> { poll_fn(|cx| { let state = self.poll_event_loop(cx, false); let mut scope = self.handle_scope(); let local = v8::Local::<v8::Value>::new(&mut scope, &global); if let Ok(promise) = v8::Local::<v8::Promise>::try_from(local) { match promise.state() { v8::PromiseState::Pending => match state { Poll::Ready(Ok(_)) => { let msg = "Promise resolution is still pending but the event loop has already resolved."; Poll::Ready(Err(generic_error(msg))) }, Poll::Ready(Err(e)) => Poll::Ready(Err(e)), Poll::Pending => Poll::Pending, }, v8::PromiseState::Fulfilled => { let value = promise.result(&mut scope); let value_handle = v8::Global::new(&mut scope, value); Poll::Ready(Ok(value_handle)) } v8::PromiseState::Rejected => { let exception = promise.result(&mut scope); Poll::Ready(exception_to_err_result(&mut scope, exception, false)) } } } else { let value_handle = v8::Global::new(&mut scope, local); Poll::Ready(Ok(value_handle)) } }) .await } /// Runs event loop to completion /// /// This future resolves when: /// - there are no more pending dynamic imports /// - there are no more pending ops /// - there are no more active inspector sessions (only if `wait_for_inspector` is set to true) pub async fn run_event_loop( &mut self, wait_for_inspector: bool, ) -> Result<(), Error> { poll_fn(|cx| self.poll_event_loop(cx, wait_for_inspector)).await } /// Runs a single tick of event loop /// /// If `wait_for_inspector` is set to true event loop /// will return `Poll::Pending` if there are active inspector sessions. pub fn poll_event_loop( &mut self, cx: &mut Context, wait_for_inspector: bool, ) -> Poll<Result<(), Error>> { // We always poll the inspector first let _ = self.inspector().poll_unpin(cx); let state_rc = Self::state(self.v8_isolate()); let module_map_rc = Self::module_map(self.v8_isolate()); { let state = state_rc.borrow(); state.waker.register(cx.waker()); } self.pump_v8_message_loop(); // Ops { self.resolve_async_ops(cx)?; self.drain_nexttick()?; self.drain_macrotasks()?; self.check_promise_exceptions()?; } // Dynamic module loading - ie. modules loaded using "import()" { let poll_imports = self.prepare_dyn_imports(cx)?; assert!(poll_imports.is_ready()); let poll_imports = self.poll_dyn_imports(cx)?; assert!(poll_imports.is_ready()); self.evaluate_dyn_imports(); self.check_promise_exceptions()?; } // Top level module self.evaluate_pending_module(); let mut state = state_rc.borrow_mut(); let module_map = module_map_rc.borrow(); let has_pending_refed_ops = state.pending_ops.len() > state.unrefed_ops.len(); let has_pending_dyn_imports = module_map.has_pending_dynamic_imports(); let has_pending_dyn_module_evaluation = !state.pending_dyn_mod_evaluate.is_empty(); let has_pending_module_evaluation = state.pending_mod_evaluate.is_some(); let has_pending_background_tasks = self.v8_isolate().has_pending_background_tasks(); let has_tick_scheduled = state.has_tick_scheduled; let inspector_has_active_sessions = self .inspector .as_ref() .map(|i| i.has_active_sessions()) .unwrap_or(false); if !has_pending_refed_ops && !has_pending_dyn_imports && !has_pending_dyn_module_evaluation && !has_pending_module_evaluation && !has_pending_background_tasks && !has_tick_scheduled { if wait_for_inspector && inspector_has_active_sessions { return Poll::Pending; } return Poll::Ready(Ok(())); } // Check if more async ops have been dispatched // during this turn of event loop. // If there are any pending background tasks, we also wake the runtime to // make sure we don't miss them. // TODO(andreubotella) The event loop will spin as long as there are pending // background tasks. We should look into having V8 notify us when a // background task is done. if state.have_unpolled_ops || has_pending_background_tasks { state.waker.wake(); } if has_pending_module_evaluation { if has_pending_refed_ops || has_pending_dyn_imports || has_pending_dyn_module_evaluation || has_pending_background_tasks { // pass, will be polled again } else { let msg = "Module evaluation is still pending but there are no pending ops or dynamic imports. This situation is often caused by unresolved promise."; return Poll::Ready(Err(generic_error(msg))); } } if has_pending_dyn_module_evaluation { if has_pending_refed_ops || has_pending_dyn_imports || has_pending_background_tasks { // pass, will be polled again } else if state.dyn_module_evaluate_idle_counter >= 1 { let mut msg = "Dynamically imported module evaluation is still pending but there are no pending ops. This situation is often caused by unresolved promise. Pending dynamic modules:\n".to_string(); for pending_evaluate in &state.pending_dyn_mod_evaluate { let module_info = module_map .get_info_by_id(&pending_evaluate.module_id) .unwrap(); msg.push_str(&format!("- {}", module_info.name.as_str())); } return Poll::Ready(Err(generic_error(msg))); } else { // Delay the above error by one spin of the event loop. A dynamic import // evaluation may complete during this, in which case the counter will // reset. state.dyn_module_evaluate_idle_counter += 1; state.waker.wake(); } } Poll::Pending } } extern "C" fn near_heap_limit_callback<F>( data: *mut c_void, current_heap_limit: usize, initial_heap_limit: usize, ) -> usize where F: FnMut(usize, usize) -> usize, { let callback = unsafe { &mut *(data as *mut F) }; callback(current_heap_limit, initial_heap_limit) } impl JsRuntimeState { /// Called by `bindings::host_import_module_dynamically_callback` /// after initiating new dynamic import load. pub fn notify_new_dynamic_import(&mut self) { // Notify event loop to poll again soon. self.waker.wake(); } } pub(crate) fn exception_to_err_result<'s, T>( scope: &mut v8::HandleScope<'s>, exception: v8::Local<v8::Value>, in_promise: bool, ) -> Result<T, Error> { let is_terminating_exception = scope.is_execution_terminating(); let mut exception = exception; if is_terminating_exception { // TerminateExecution was called. Cancel exception termination so that the // exception can be created.. scope.cancel_terminate_execution(); // Maybe make a new exception object. if exception.is_null_or_undefined() { let message = v8::String::new(scope, "execution terminated").unwrap(); exception = v8::Exception::error(scope, message); } } let mut js_error = JsError::from_v8_exception(scope, exception); if in_promise { js_error.message = format!( "Uncaught (in promise) {}", js_error.message.trim_start_matches("Uncaught ") ); } let state_rc = JsRuntime::state(scope); let state = state_rc.borrow(); let js_error = (state.js_error_create_fn)(js_error); if is_terminating_exception { // Re-enable exception termination. scope.terminate_execution(); } Err(js_error) } // Related to module loading impl JsRuntime { pub(crate) fn instantiate_module( &mut self, id: ModuleId, ) -> Result<(), Error> { let module_map_rc = Self::module_map(self.v8_isolate()); let scope = &mut self.handle_scope(); let tc_scope = &mut v8::TryCatch::new(scope); let module = module_map_rc .borrow() .get_handle(id) .map(|handle| v8::Local::new(tc_scope, handle)) .expect("ModuleInfo not found"); if module.get_status() == v8::ModuleStatus::Errored { let exception = module.get_exception(); let err = exception_to_err_result(tc_scope, exception, false) .map_err(|err| attach_handle_to_error(tc_scope, err, exception)); return err; } // IMPORTANT: No borrows to `ModuleMap` can be held at this point because // `module_resolve_callback` will be calling into `ModuleMap` from within // the isolate. let instantiate_result = module.instantiate_module(tc_scope, bindings::module_resolve_callback); if instantiate_result.is_none() { let exception = tc_scope.exception().unwrap(); let err = exception_to_err_result(tc_scope, exception, false) .map_err(|err| attach_handle_to_error(tc_scope, err, exception)); return err; } Ok(()) } fn dynamic_import_module_evaluate( &mut self, load_id: ModuleLoadId, id: ModuleId, ) -> Result<(), Error> { let state_rc = Self::state(self.v8_isolate()); let module_map_rc = Self::module_map(self.v8_isolate()); let module_handle = module_map_rc .borrow() .get_handle(id) .expect("ModuleInfo not found"); let status = { let scope = &mut self.handle_scope(); let module = module_handle.open(scope); module.get_status() }; match status { v8::ModuleStatus::Instantiated | v8::ModuleStatus::Evaluated => {} _ => return Ok(()), } // IMPORTANT: Top-level-await is enabled, which means that return value // of module evaluation is a promise. // // This promise is internal, and not the same one that gets returned to // the user. We add an empty `.catch()` handler so that it does not result // in an exception if it rejects. That will instead happen for the other // promise if not handled by the user. // // For more details see: // https://github.com/denoland/deno/issues/4908 // https://v8.dev/features/top-level-await#module-execution-order let scope = &mut self.handle_scope(); let tc_scope = &mut v8::TryCatch::new(scope); let module = v8::Local::new(tc_scope, &module_handle); let maybe_value = module.evaluate(tc_scope); // Update status after evaluating. let status = module.get_status(); if let Some(value) = maybe_value { assert!( status == v8::ModuleStatus::Evaluated || status == v8::ModuleStatus::Errored ); let promise = v8::Local::<v8::Promise>::try_from(value) .expect("Expected to get promise as module evaluation result"); let empty_fn = |_scope: &mut v8::HandleScope, _args: v8::FunctionCallbackArguments, _rv: v8::ReturnValue| {}; let empty_fn = v8::FunctionTemplate::new(tc_scope, empty_fn); let empty_fn = empty_fn.get_function(tc_scope).unwrap(); promise.catch(tc_scope, empty_fn); let mut state = state_rc.borrow_mut(); let promise_global = v8::Global::new(tc_scope, promise); let module_global = v8::Global::new(tc_scope, module); let dyn_import_mod_evaluate = DynImportModEvaluate { load_id, module_id: id, promise: promise_global, module: module_global, }; state.pending_dyn_mod_evaluate.push(dyn_import_mod_evaluate); } else if tc_scope.has_terminated() || tc_scope.is_execution_terminating() { return Err( generic_error("Cannot evaluate dynamically imported module, because JavaScript execution has been terminated.") ); } else { assert!(status == v8::ModuleStatus::Errored); } Ok(()) } // TODO(bartlomieju): make it return `ModuleEvaluationFuture`? /// Evaluates an already instantiated ES module. /// /// Returns a receiver handle that resolves when module promise resolves. /// Implementors must manually call `run_event_loop()` to drive module /// evaluation future. /// /// `Error` can be downcast to a type that exposes additional information /// about the V8 exception. By default this type is `JsError`, however it may /// be a different type if `RuntimeOptions::js_error_create_fn` has been set. /// /// This function panics if module has not been instantiated. pub fn mod_evaluate( &mut self, id: ModuleId, ) -> oneshot::Receiver<Result<(), Error>> { let state_rc = Self::state(self.v8_isolate()); let module_map_rc = Self::module_map(self.v8_isolate()); let scope = &mut self.handle_scope(); let tc_scope = &mut v8::TryCatch::new(scope); let module = module_map_rc .borrow() .get_handle(id) .map(|handle| v8::Local::new(tc_scope, handle)) .expect("ModuleInfo not found"); let mut status = module.get_status(); assert_eq!(status, v8::ModuleStatus::Instantiated); let (sender, receiver) = oneshot::channel(); // IMPORTANT: Top-level-await is enabled, which means that return value // of module evaluation is a promise. // // Because that promise is created internally by V8, when error occurs during // module evaluation the promise is rejected, and since the promise has no rejection // handler it will result in call to `bindings::promise_reject_callback` adding // the promise to pending promise rejection table - meaning JsRuntime will return // error on next poll(). // // This situation is not desirable as we want to manually return error at the // end of this function to handle it further. It means we need to manually // remove this promise from pending promise rejection table. // // For more details see: // https://github.com/denoland/deno/issues/4908 // https://v8.dev/features/top-level-await#module-execution-order let maybe_value = module.evaluate(tc_scope); // Update status after evaluating. status = module.get_status(); if let Some(value) = maybe_value { assert!( status == v8::ModuleStatus::Evaluated || status == v8::ModuleStatus::Errored ); let promise = v8::Local::<v8::Promise>::try_from(value) .expect("Expected to get promise as module evaluation result"); let promise_global = v8::Global::new(tc_scope, promise); let mut state = state_rc.borrow_mut(); state.pending_promise_exceptions.remove(&promise_global); let promise_global = v8::Global::new(tc_scope, promise); assert!( state.pending_mod_evaluate.is_none(), "There is already pending top level module evaluation" ); state.pending_mod_evaluate = Some(ModEvaluate { promise: promise_global, sender, }); tc_scope.perform_microtask_checkpoint(); } else if tc_scope.has_terminated() || tc_scope.is_execution_terminating() { sender.send(Err( generic_error("Cannot evaluate module, because JavaScript execution has been terminated.") )).expect("Failed to send module evaluation error."); } else { assert!(status == v8::ModuleStatus::Errored); } receiver } fn dynamic_import_reject(&mut self, id: ModuleLoadId, err: Error) { let module_map_rc = Self::module_map(self.v8_isolate()); let scope = &mut self.handle_scope(); let resolver_handle = module_map_rc .borrow_mut() .dynamic_import_map .remove(&id) .expect("Invalid dynamic import id"); let resolver = resolver_handle.open(scope); let exception = err .downcast_ref::<ErrWithV8Handle>() .map(|err| err.get_handle(scope)) .unwrap_or_else(|| { let message = err.to_string(); let message = v8::String::new(scope, &message).unwrap(); v8::Exception::type_error(scope, message) }); // IMPORTANT: No borrows to `ModuleMap` can be held at this point because // rejecting the promise might initiate another `import()` which will // in turn call `bindings::host_import_module_dynamically_callback` which // will reach into `ModuleMap` from within the isolate. resolver.reject(scope, exception).unwrap(); scope.perform_microtask_checkpoint(); } fn dynamic_import_resolve(&mut self, id: ModuleLoadId, mod_id: ModuleId) { let state_rc = Self::state(self.v8_isolate()); let module_map_rc = Self::module_map(self.v8_isolate()); let scope = &mut self.handle_scope(); let resolver_handle = module_map_rc .borrow_mut() .dynamic_import_map .remove(&id) .expect("Invalid dynamic import id"); let resolver = resolver_handle.open(scope); let module = { module_map_rc .borrow() .get_handle(mod_id) .map(|handle| v8::Local::new(scope, handle)) .expect("Dyn import module info not found") }; // Resolution success assert_eq!(module.get_status(), v8::ModuleStatus::Evaluated); // IMPORTANT: No borrows to `ModuleMap` can be held at this point because // resolving the promise might initiate another `import()` which will // in turn call `bindings::host_import_module_dynamically_callback` which // will reach into `ModuleMap` from within the isolate. let module_namespace = module.get_module_namespace(); resolver.resolve(scope, module_namespace).unwrap(); state_rc.borrow_mut().dyn_module_evaluate_idle_counter = 0; scope.perform_microtask_checkpoint(); } fn prepare_dyn_imports( &mut self, cx: &mut Context, ) -> Poll<Result<(), Error>> { let module_map_rc = Self::module_map(self.v8_isolate()); if module_map_rc.borrow().preparing_dynamic_imports.is_empty() { return Poll::Ready(Ok(())); } loop { let poll_result = module_map_rc .borrow_mut() .preparing_dynamic_imports .poll_next_unpin(cx); if let Poll::Ready(Some(prepare_poll)) = poll_result { let dyn_import_id = prepare_poll.0; let prepare_result = prepare_poll.1; match prepare_result { Ok(load) => { module_map_rc .borrow_mut() .pending_dynamic_imports .push(load.into_future()); } Err(err) => { self.dynamic_import_reject(dyn_import_id, err); } } // Continue polling for more prepared dynamic imports. continue; } // There are no active dynamic import loads, or none are ready. return Poll::Ready(Ok(())); } } fn poll_dyn_imports(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> { let module_map_rc = Self::module_map(self.v8_isolate()); if module_map_rc.borrow().pending_dynamic_imports.is_empty() { return Poll::Ready(Ok(())); } loop { let poll_result = module_map_rc .borrow_mut() .pending_dynamic_imports .poll_next_unpin(cx); if let Poll::Ready(Some(load_stream_poll)) = poll_result { let maybe_result = load_stream_poll.0; let mut load = load_stream_poll.1; let dyn_import_id = load.id; if let Some(load_stream_result) = maybe_result { match load_stream_result { Ok(info) => { // A module (not necessarily the one dynamically imported) has been // fetched. Create and register it, and if successful, poll for the // next recursive-load event related to this dynamic import. let register_result = load.register_and_recurse(&mut self.handle_scope(), &info); match register_result { Ok(()) => { // Keep importing until it's fully drained module_map_rc .borrow_mut() .pending_dynamic_imports .push(load.into_future()); } Err(err) => self.dynamic_import_reject(dyn_import_id, err), } } Err(err) => { // A non-javascript error occurred; this could be due to a an invalid // module specifier, or a problem with the source map, or a failure // to fetch the module source code. self.dynamic_import_reject(dyn_import_id, err) } } } else { // The top-level module from a dynamic import has been instantiated. // Load is done. let module_id = load.root_module_id.expect("Root module should be loaded"); let result = self.instantiate_module(module_id); if let Err(err) = result { self.dynamic_import_reject(dyn_import_id, err); } self.dynamic_import_module_evaluate(dyn_import_id, module_id)?; } // Continue polling for more ready dynamic imports. continue; } // There are no active dynamic import loads, or none are ready. return Poll::Ready(Ok(())); } } /// "deno_core" runs V8 with Top Level Await enabled. It means that each /// module evaluation returns a promise from V8. /// Feature docs: https://v8.dev/features/top-level-await /// /// This promise resolves after all dependent modules have also /// resolved. Each dependent module may perform calls to "import()" and APIs /// using async ops will add futures to the runtime's event loop. /// It means that the promise returned from module evaluation will /// resolve only after all futures in the event loop are done. /// /// Thus during turn of event loop we need to check if V8 has /// resolved or rejected the promise. If the promise is still pending /// then another turn of event loop must be performed. fn evaluate_pending_module(&mut self) { let state_rc = Self::state(self.v8_isolate()); let maybe_module_evaluation = state_rc.borrow_mut().pending_mod_evaluate.take(); if maybe_module_evaluation.is_none() { return; } let module_evaluation = maybe_module_evaluation.unwrap(); let scope = &mut self.handle_scope(); let promise = module_evaluation.promise.open(scope); let promise_state = promise.state(); match promise_state { v8::PromiseState::Pending => { // NOTE: `poll_event_loop` will decide if // runtime would be woken soon state_rc.borrow_mut().pending_mod_evaluate = Some(module_evaluation); } v8::PromiseState::Fulfilled => { scope.perform_microtask_checkpoint(); // Receiver end might have been already dropped, ignore the result let _ = module_evaluation.sender.send(Ok(())); } v8::PromiseState::Rejected => { let exception = promise.result(scope); scope.perform_microtask_checkpoint(); let err1 = exception_to_err_result::<()>(scope, exception, false) .map_err(|err| attach_handle_to_error(scope, err, exception)) .unwrap_err(); // Receiver end might have been already dropped, ignore the result let _ = module_evaluation.sender.send(Err(err1)); } } } fn evaluate_dyn_imports(&mut self) { let state_rc = Self::state(self.v8_isolate()); let mut still_pending = vec![]; let pending = std::mem::take(&mut state_rc.borrow_mut().pending_dyn_mod_evaluate); for pending_dyn_evaluate in pending { let maybe_result = { let scope = &mut self.handle_scope(); let module_id = pending_dyn_evaluate.module_id; let promise = pending_dyn_evaluate.promise.open(scope); let _module = pending_dyn_evaluate.module.open(scope); let promise_state = promise.state(); match promise_state { v8::PromiseState::Pending => { still_pending.push(pending_dyn_evaluate); None } v8::PromiseState::Fulfilled => { Some(Ok((pending_dyn_evaluate.load_id, module_id))) } v8::PromiseState::Rejected => { let exception = promise.result(scope); let err1 = exception_to_err_result::<()>(scope, exception, false) .map_err(|err| attach_handle_to_error(scope, err, exception)) .unwrap_err(); Some(Err((pending_dyn_evaluate.load_id, err1))) } } }; if let Some(result) = maybe_result { match result { Ok((dyn_import_id, module_id)) => { self.dynamic_import_resolve(dyn_import_id, module_id); } Err((dyn_import_id, err1)) => { self.dynamic_import_reject(dyn_import_id, err1); } } } } state_rc.borrow_mut().pending_dyn_mod_evaluate = still_pending; } /// Asynchronously load specified module and all of its dependencies. /// /// The module will be marked as "main", and because of that /// "import.meta.main" will return true when checked inside that module. /// /// User must call `JsRuntime::mod_evaluate` with returned `ModuleId` /// manually after load is finished. pub async fn load_main_module( &mut self, specifier: &ModuleSpecifier, code: Option<String>, ) -> Result<ModuleId, Error> { let module_map_rc = Self::module_map(self.v8_isolate()); if let Some(code) = code { module_map_rc.borrow_mut().new_module( &mut self.handle_scope(), // main module true, specifier.as_str(), &code, )?; } let mut load = ModuleMap::load_main(module_map_rc.clone(), specifier.as_str()).await?; while let Some(info_result) = load.next().await { let info = info_result?; let scope = &mut self.handle_scope(); load.register_and_recurse(scope, &info)?; } let root_id = load.root_module_id.expect("Root module should be loaded"); self.instantiate_module(root_id)?; Ok(root_id) } /// Asynchronously load specified ES module and all of its dependencies. /// /// This method is meant to be used when loading some utility code that /// might be later imported by the main module (ie. an entry point module). /// /// User must call `JsRuntime::mod_evaluate` with returned `ModuleId` /// manually after load is finished. pub async fn load_side_module( &mut self, specifier: &ModuleSpecifier, code: Option<String>, ) -> Result<ModuleId, Error> { let module_map_rc = Self::module_map(self.v8_isolate()); if let Some(code) = code { module_map_rc.borrow_mut().new_module( &mut self.handle_scope(), // not main module false, specifier.as_str(), &code, )?; } let mut load = ModuleMap::load_side(module_map_rc.clone(), specifier.as_str()).await?; while let Some(info_result) = load.next().await { let info = info_result?; let scope = &mut self.handle_scope(); load.register_and_recurse(scope, &info)?; } let root_id = load.root_module_id.expect("Root module should be loaded"); self.instantiate_module(root_id)?; Ok(root_id) } fn check_promise_exceptions(&mut self) -> Result<(), Error> { let state_rc = Self::state(self.v8_isolate()); let mut state = state_rc.borrow_mut(); if state.pending_promise_exceptions.is_empty() { return Ok(()); } let key = { state .pending_promise_exceptions .keys() .next() .unwrap() .clone() }; let handle = state.pending_promise_exceptions.remove(&key).unwrap(); drop(state); let scope = &mut self.handle_scope(); let exception = v8::Local::new(scope, handle); exception_to_err_result(scope, exception, true) } // Send finished responses to JS fn resolve_async_ops(&mut self, cx: &mut Context) -> Result<(), Error> { let state_rc = Self::state(self.v8_isolate()); let js_recv_cb_handle = state_rc.borrow().js_recv_cb.clone().unwrap(); let scope = &mut self.handle_scope(); // We return async responses to JS in unbounded batches (may change), // each batch is a flat vector of tuples: // `[promise_id1, op_result1, promise_id2, op_result2, ...]` // promise_id is a simple integer, op_result is an ops::OpResult // which contains a value OR an error, encoded as a tuple. // This batch is received in JS via the special `arguments` variable // and then each tuple is used to resolve or reject promises let mut args: Vec<v8::Local<v8::Value>> = vec![]; // Now handle actual ops. { let mut state = state_rc.borrow_mut(); state.have_unpolled_ops = false; let op_state = state.op_state.clone(); while let Poll::Ready(Some(item)) = state.pending_ops.poll_next_unpin(cx) { let (promise_id, op_id, resp) = item; op_state.borrow().tracker.track_async_completed(op_id); state.unrefed_ops.remove(&promise_id); args.push(v8::Integer::new(scope, promise_id as i32).into()); args.push(resp.to_v8(scope).unwrap()); } } if args.is_empty() { return Ok(()); } let tc_scope = &mut v8::TryCatch::new(scope); let js_recv_cb = js_recv_cb_handle.open(tc_scope); let this = v8::undefined(tc_scope).into(); js_recv_cb.call(tc_scope, this, args.as_slice()); match tc_scope.exception() { None => Ok(()), Some(exception) => exception_to_err_result(tc_scope, exception, false), } } fn drain_macrotasks(&mut self) -> Result<(), Error> { let state = Self::state(self.v8_isolate()); if state.borrow().js_macrotask_cbs.is_empty() { return Ok(()); } let js_macrotask_cb_handles = state.borrow().js_macrotask_cbs.clone(); let scope = &mut self.handle_scope(); for js_macrotask_cb_handle in js_macrotask_cb_handles { let js_macrotask_cb = js_macrotask_cb_handle.open(scope); // Repeatedly invoke macrotask callback until it returns true (done), // such that ready microtasks would be automatically run before // next macrotask is processed. let tc_scope = &mut v8::TryCatch::new(scope); let this = v8::undefined(tc_scope).into(); loop { let is_done = js_macrotask_cb.call(tc_scope, this, &[]); if let Some(exception) = tc_scope.exception() { return exception_to_err_result(tc_scope, exception, false); } if tc_scope.has_terminated() || tc_scope.is_execution_terminating() { return Ok(()); } let is_done = is_done.unwrap(); if is_done.is_true() { break; } } } Ok(()) } fn drain_nexttick(&mut self) -> Result<(), Error> { let state = Self::state(self.v8_isolate()); if state.borrow().js_nexttick_cbs.is_empty() { return Ok(()); } if !state.borrow().has_tick_scheduled { let scope = &mut self.handle_scope(); scope.perform_microtask_checkpoint(); } // TODO(bartlomieju): Node also checks for absence of "rejection_to_warn" if !state.borrow().has_tick_scheduled { return Ok(()); } let js_nexttick_cb_handles = state.borrow().js_nexttick_cbs.clone(); let scope = &mut self.handle_scope(); for js_nexttick_cb_handle in js_nexttick_cb_handles { let js_nexttick_cb = js_nexttick_cb_handle.open(scope); let tc_scope = &mut v8::TryCatch::new(scope); let this = v8::undefined(tc_scope).into(); js_nexttick_cb.call(tc_scope, this, &[]); if let Some(exception) = tc_scope.exception() { return exception_to_err_result(tc_scope, exception, false); } } Ok(()) } } #[cfg(test)] pub mod tests { use super::*; use crate::error::custom_error; use crate::modules::ModuleSource; use crate::modules::ModuleSourceFuture; use crate::op_async; use crate::op_sync; use crate::ZeroCopyBuf; use futures::future::lazy; use std::ops::FnOnce; use std::pin::Pin; use std::rc::Rc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; pub fn run_in_task<F>(f: F) where F: FnOnce(&mut Context) + Send + 'static, { futures::executor::block_on(lazy(move |cx| f(cx))); } enum Mode { Async, AsyncZeroCopy(bool), } struct TestState { mode: Mode, dispatch_count: Arc<AtomicUsize>, } fn dispatch(rc_op_state: Rc<RefCell<OpState>>, payload: OpPayload) -> Op { let rc_op_state2 = rc_op_state.clone(); let op_state_ = rc_op_state2.borrow(); let test_state = op_state_.borrow::<TestState>(); test_state.dispatch_count.fetch_add(1, Ordering::Relaxed); let (control, buf): (u8, Option<ZeroCopyBuf>) = payload.deserialize().unwrap(); match test_state.mode { Mode::Async => { assert_eq!(control, 42); let resp = (0, 1, serialize_op_result(Ok(43), rc_op_state)); Op::Async(OpCall::ready(resp)) } Mode::AsyncZeroCopy(has_buffer) => { assert_eq!(buf.is_some(), has_buffer); if let Some(buf) = buf { assert_eq!(buf.len(), 1); } let resp = (0, 1, serialize_op_result(Ok(43), rc_op_state)); Op::Async(OpCall::ready(resp)) } } } fn setup(mode: Mode) -> (JsRuntime, Arc<AtomicUsize>) { let dispatch_count = Arc::new(AtomicUsize::new(0)); let mut runtime = JsRuntime::new(Default::default()); let op_state = runtime.op_state(); op_state.borrow_mut().put(TestState { mode, dispatch_count: dispatch_count.clone(), }); runtime.register_op("op_test", dispatch); runtime.sync_ops_cache(); runtime .execute_script( "setup.js", r#" function assert(cond) { if (!cond) { throw Error("assert"); } } "#, ) .unwrap(); assert_eq!(dispatch_count.load(Ordering::Relaxed), 0); (runtime, dispatch_count) } #[test] fn test_dispatch() { let (mut runtime, dispatch_count) = setup(Mode::Async); runtime .execute_script( "filename.js", r#" let control = 42; Deno.core.opAsync("op_test", control); async function main() { Deno.core.opAsync("op_test", control); } main(); "#, ) .unwrap(); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); } #[test] fn test_op_async_promise_id() { let (mut runtime, _dispatch_count) = setup(Mode::Async); runtime .execute_script( "filename.js", r#" const p = Deno.core.opAsync("op_test", 42); if (p[Symbol.for("Deno.core.internalPromiseId")] == undefined) { throw new Error("missing id on returned promise"); } "#, ) .unwrap(); } #[test] fn test_ref_unref_ops() { let (mut runtime, _dispatch_count) = setup(Mode::Async); runtime .execute_script( "filename.js", r#" var promiseIdSymbol = Symbol.for("Deno.core.internalPromiseId"); var p1 = Deno.core.opAsync("op_test", 42); var p2 = Deno.core.opAsync("op_test", 42); "#, ) .unwrap(); { let isolate = runtime.v8_isolate(); let state_rc = JsRuntime::state(isolate); let state = state_rc.borrow(); assert_eq!(state.pending_ops.len(), 2); assert_eq!(state.unrefed_ops.len(), 0); } runtime .execute_script( "filename.js", r#" Deno.core.unrefOp(p1[promiseIdSymbol]); Deno.core.unrefOp(p2[promiseIdSymbol]); "#, ) .unwrap(); { let isolate = runtime.v8_isolate(); let state_rc = JsRuntime::state(isolate); let state = state_rc.borrow(); assert_eq!(state.pending_ops.len(), 2); assert_eq!(state.unrefed_ops.len(), 2); } runtime .execute_script( "filename.js", r#" Deno.core.refOp(p1[promiseIdSymbol]); Deno.core.refOp(p2[promiseIdSymbol]); "#, ) .unwrap(); { let isolate = runtime.v8_isolate(); let state_rc = JsRuntime::state(isolate); let state = state_rc.borrow(); assert_eq!(state.pending_ops.len(), 2); assert_eq!(state.unrefed_ops.len(), 0); } } #[test] fn test_dispatch_no_zero_copy_buf() { let (mut runtime, dispatch_count) = setup(Mode::AsyncZeroCopy(false)); runtime .execute_script( "filename.js", r#" Deno.core.opAsync("op_test"); "#, ) .unwrap(); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); } #[test] fn test_dispatch_stack_zero_copy_bufs() { let (mut runtime, dispatch_count) = setup(Mode::AsyncZeroCopy(true)); runtime .execute_script( "filename.js", r#" let zero_copy_a = new Uint8Array([0]); Deno.core.opAsync("op_test", null, zero_copy_a); "#, ) .unwrap(); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); } #[test] fn test_execute_script_return_value() { let mut runtime = JsRuntime::new(Default::default()); let value_global = runtime.execute_script("a.js", "a = 1 + 2").unwrap(); { let scope = &mut runtime.handle_scope(); let value = value_global.open(scope); assert_eq!(value.integer_value(scope).unwrap(), 3); } let value_global = runtime.execute_script("b.js", "b = 'foobar'").unwrap(); { let scope = &mut runtime.handle_scope(); let value = value_global.open(scope); assert!(value.is_string()); assert_eq!( value.to_string(scope).unwrap().to_rust_string_lossy(scope), "foobar" ); } } #[tokio::test] async fn test_resolve_value() { let mut runtime = JsRuntime::new(Default::default()); let value_global = runtime .execute_script("a.js", "Promise.resolve(1 + 2)") .unwrap(); let result_global = runtime.resolve_value(value_global).await.unwrap(); { let scope = &mut runtime.handle_scope(); let value = result_global.open(scope); assert_eq!(value.integer_value(scope).unwrap(), 3); } let value_global = runtime .execute_script( "a.js", "Promise.resolve(new Promise(resolve => resolve(2 + 2)))", ) .unwrap(); let result_global = runtime.resolve_value(value_global).await.unwrap(); { let scope = &mut runtime.handle_scope(); let value = result_global.open(scope); assert_eq!(value.integer_value(scope).unwrap(), 4); } let value_global = runtime .execute_script("a.js", "Promise.reject(new Error('fail'))") .unwrap(); let err = runtime.resolve_value(value_global).await.unwrap_err(); assert_eq!( "Uncaught Error: fail", err.downcast::<JsError>().unwrap().message ); let value_global = runtime .execute_script("a.js", "new Promise(resolve => {})") .unwrap(); let error_string = runtime .resolve_value(value_global) .await .unwrap_err() .to_string(); assert_eq!( "Promise resolution is still pending but the event loop has already resolved.", error_string, ); } #[test] fn terminate_execution() { let (mut isolate, _dispatch_count) = setup(Mode::Async); // TODO(piscisaureus): in rusty_v8, the `thread_safe_handle()` method // should not require a mutable reference to `struct rusty_v8::Isolate`. let v8_isolate_handle = isolate.v8_isolate().thread_safe_handle(); let terminator_thread = std::thread::spawn(move || { // allow deno to boot and run std::thread::sleep(std::time::Duration::from_millis(100)); // terminate execution let ok = v8_isolate_handle.terminate_execution(); assert!(ok); }); // Rn an infinite loop, which should be terminated. match isolate.execute_script("infinite_loop.js", "for(;;) {}") { Ok(_) => panic!("execution should be terminated"), Err(e) => { assert_eq!(e.to_string(), "Uncaught Error: execution terminated") } }; // Cancel the execution-terminating exception in order to allow script // execution again. let ok = isolate.v8_isolate().cancel_terminate_execution(); assert!(ok); // Verify that the isolate usable again. isolate .execute_script("simple.js", "1 + 1") .expect("execution should be possible again"); terminator_thread.join().unwrap(); } #[test] fn dangling_shared_isolate() { let v8_isolate_handle = { // isolate is dropped at the end of this block let (mut runtime, _dispatch_count) = setup(Mode::Async); // TODO(piscisaureus): in rusty_v8, the `thread_safe_handle()` method // should not require a mutable reference to `struct rusty_v8::Isolate`. runtime.v8_isolate().thread_safe_handle() }; // this should not SEGFAULT v8_isolate_handle.terminate_execution(); } #[test] fn test_pre_dispatch() { run_in_task(|mut cx| { let (mut runtime, _dispatch_count) = setup(Mode::Async); runtime .execute_script( "bad_op_id.js", r#" let thrown; try { Deno.core.opcallSync(100, null, null); } catch (e) { thrown = e; } assert(String(thrown) === "TypeError: Unknown op id: 100"); "#, ) .unwrap(); if let Poll::Ready(Err(_)) = runtime.poll_event_loop(&mut cx, false) { unreachable!(); } }); } #[test] fn syntax_error() { let mut runtime = JsRuntime::new(Default::default()); let src = "hocuspocus("; let r = runtime.execute_script("i.js", src); let e = r.unwrap_err(); let js_error = e.downcast::<JsError>().unwrap(); assert_eq!(js_error.end_column, Some(11)); } #[test] fn test_encode_decode() { run_in_task(|mut cx| { let (mut runtime, _dispatch_count) = setup(Mode::Async); runtime .execute_script( "encode_decode_test.js", include_str!("encode_decode_test.js"), ) .unwrap(); if let Poll::Ready(Err(_)) = runtime.poll_event_loop(&mut cx, false) { unreachable!(); } }); } #[test] fn test_serialize_deserialize() { run_in_task(|mut cx| { let (mut runtime, _dispatch_count) = setup(Mode::Async); runtime .execute_script( "serialize_deserialize_test.js", include_str!("serialize_deserialize_test.js"), ) .unwrap(); if let Poll::Ready(Err(_)) = runtime.poll_event_loop(&mut cx, false) { unreachable!(); } }); } #[test] fn test_error_builder() { fn op_err(_: &mut OpState, _: (), _: ()) -> Result<(), Error> { Err(custom_error("DOMExceptionOperationError", "abc")) } pub fn get_error_class_name(_: &Error) -> &'static str { "DOMExceptionOperationError" } run_in_task(|mut cx| { let mut runtime = JsRuntime::new(RuntimeOptions { get_error_class_fn: Some(&get_error_class_name), ..Default::default() }); runtime.register_op("op_err", op_sync(op_err)); runtime.sync_ops_cache(); runtime .execute_script( "error_builder_test.js", include_str!("error_builder_test.js"), ) .unwrap(); if let Poll::Ready(Err(_)) = runtime.poll_event_loop(&mut cx, false) { unreachable!(); } }); } #[test] fn will_snapshot() { let snapshot = { let mut runtime = JsRuntime::new(RuntimeOptions { will_snapshot: true, ..Default::default() }); runtime.execute_script("a.js", "a = 1 + 2").unwrap(); runtime.snapshot() }; let snapshot = Snapshot::JustCreated(snapshot); let mut runtime2 = JsRuntime::new(RuntimeOptions { startup_snapshot: Some(snapshot), ..Default::default() }); runtime2 .execute_script("check.js", "if (a != 3) throw Error('x')") .unwrap(); } #[test] fn test_from_boxed_snapshot() { let snapshot = { let mut runtime = JsRuntime::new(RuntimeOptions { will_snapshot: true, ..Default::default() }); runtime.execute_script("a.js", "a = 1 + 2").unwrap(); let snap: &[u8] = &*runtime.snapshot(); Vec::from(snap).into_boxed_slice() }; let snapshot = Snapshot::Boxed(snapshot); let mut runtime2 = JsRuntime::new(RuntimeOptions { startup_snapshot: Some(snapshot), ..Default::default() }); runtime2 .execute_script("check.js", "if (a != 3) throw Error('x')") .unwrap(); } #[test] fn test_heap_limits() { let create_params = v8::Isolate::create_params().heap_limits(0, 3 * 1024 * 1024); let mut runtime = JsRuntime::new(RuntimeOptions { create_params: Some(create_params), ..Default::default() }); let cb_handle = runtime.v8_isolate().thread_safe_handle(); let callback_invoke_count = Rc::new(AtomicUsize::default()); let inner_invoke_count = Rc::clone(&callback_invoke_count); runtime.add_near_heap_limit_callback( move |current_limit, _initial_limit| { inner_invoke_count.fetch_add(1, Ordering::SeqCst); cb_handle.terminate_execution(); current_limit * 2 }, ); let err = runtime .execute_script( "script name", r#"let s = ""; while(true) { s += "Hello"; }"#, ) .expect_err("script should fail"); assert_eq!( "Uncaught Error: execution terminated", err.downcast::<JsError>().unwrap().message ); assert!(callback_invoke_count.load(Ordering::SeqCst) > 0) } #[test] fn test_heap_limit_cb_remove() { let mut runtime = JsRuntime::new(Default::default()); runtime.add_near_heap_limit_callback(|current_limit, _initial_limit| { current_limit * 2 }); runtime.remove_near_heap_limit_callback(3 * 1024 * 1024); assert!(runtime.allocations.near_heap_limit_callback_data.is_none()); } #[test] fn test_heap_limit_cb_multiple() { let create_params = v8::Isolate::create_params().heap_limits(0, 3 * 1024 * 1024); let mut runtime = JsRuntime::new(RuntimeOptions { create_params: Some(create_params), ..Default::default() }); let cb_handle = runtime.v8_isolate().thread_safe_handle(); let callback_invoke_count_first = Rc::new(AtomicUsize::default()); let inner_invoke_count_first = Rc::clone(&callback_invoke_count_first); runtime.add_near_heap_limit_callback( move |current_limit, _initial_limit| { inner_invoke_count_first.fetch_add(1, Ordering::SeqCst); current_limit * 2 }, ); let callback_invoke_count_second = Rc::new(AtomicUsize::default()); let inner_invoke_count_second = Rc::clone(&callback_invoke_count_second); runtime.add_near_heap_limit_callback( move |current_limit, _initial_limit| { inner_invoke_count_second.fetch_add(1, Ordering::SeqCst); cb_handle.terminate_execution(); current_limit * 2 }, ); let err = runtime .execute_script( "script name", r#"let s = ""; while(true) { s += "Hello"; }"#, ) .expect_err("script should fail"); assert_eq!( "Uncaught Error: execution terminated", err.downcast::<JsError>().unwrap().message ); assert_eq!(0, callback_invoke_count_first.load(Ordering::SeqCst)); assert!(callback_invoke_count_second.load(Ordering::SeqCst) > 0); } #[test] fn es_snapshot() { #[derive(Default)] struct ModsLoader; impl ModuleLoader for ModsLoader { fn resolve( &self, specifier: &str, referrer: &str, _is_main: bool, ) -> Result<ModuleSpecifier, Error> { assert_eq!(specifier, "file:///main.js"); assert_eq!(referrer, "."); let s = crate::resolve_import(specifier, referrer).unwrap(); Ok(s) } fn load( &self, _module_specifier: &ModuleSpecifier, _maybe_referrer: Option<ModuleSpecifier>, _is_dyn_import: bool, ) -> Pin<Box<ModuleSourceFuture>> { unreachable!() } } let loader = std::rc::Rc::new(ModsLoader::default()); let mut runtime = JsRuntime::new(RuntimeOptions { module_loader: Some(loader), will_snapshot: true, ..Default::default() }); let specifier = crate::resolve_url("file:///main.js").unwrap(); let source_code = "Deno.core.print('hello\\n')".to_string(); let module_id = futures::executor::block_on( runtime.load_main_module(&specifier, Some(source_code)), ) .unwrap(); let _ = runtime.mod_evaluate(module_id); futures::executor::block_on(runtime.run_event_loop(false)).unwrap(); let _snapshot = runtime.snapshot(); } #[test] fn test_error_without_stack() { let mut runtime = JsRuntime::new(RuntimeOptions::default()); // SyntaxError let result = runtime.execute_script( "error_without_stack.js", r#" function main() { console.log("asdf); } main(); "#, ); let expected_error = r#"Uncaught SyntaxError: Invalid or unexpected token at error_without_stack.js:3:14"#; assert_eq!(result.unwrap_err().to_string(), expected_error); } #[test] fn test_error_stack() { let mut runtime = JsRuntime::new(RuntimeOptions::default()); let result = runtime.execute_script( "error_stack.js", r#" function assert(cond) { if (!cond) { throw Error("assert"); } } function main() { assert(false); } main(); "#, ); let expected_error = r#"Error: assert at assert (error_stack.js:4:11) at main (error_stack.js:9:3) at error_stack.js:12:1"#; assert_eq!(result.unwrap_err().to_string(), expected_error); } #[test] fn test_error_async_stack() { run_in_task(|cx| { let mut runtime = JsRuntime::new(RuntimeOptions::default()); runtime .execute_script( "error_async_stack.js", r#" (async () => { const p = (async () => { await Promise.resolve().then(() => { throw new Error("async"); }); })(); try { await p; } catch (error) { console.log(error.stack); throw error; } })();"#, ) .unwrap(); let expected_error = r#"Error: async at error_async_stack.js:5:13 at async error_async_stack.js:4:5 at async error_async_stack.js:10:5"#; match runtime.poll_event_loop(cx, false) { Poll::Ready(Err(e)) => { assert_eq!(e.to_string(), expected_error); } _ => panic!(), }; }) } #[test] fn test_pump_message_loop() { run_in_task(|cx| { let mut runtime = JsRuntime::new(RuntimeOptions::default()); runtime .execute_script( "pump_message_loop.js", r#" function assertEquals(a, b) { if (a === b) return; throw a + " does not equal " + b; } const sab = new SharedArrayBuffer(16); const i32a = new Int32Array(sab); globalThis.resolved = false; (function() { const result = Atomics.waitAsync(i32a, 0, 0); result.value.then( (value) => { assertEquals("ok", value); globalThis.resolved = true; }, () => { assertUnreachable(); }); })(); const notify_return_value = Atomics.notify(i32a, 0, 1); assertEquals(1, notify_return_value); "#, ) .unwrap(); match runtime.poll_event_loop(cx, false) { Poll::Ready(Ok(())) => {} _ => panic!(), }; // noop script, will resolve promise from first script runtime .execute_script("pump_message_loop2.js", r#"assertEquals(1, 1);"#) .unwrap(); // check that promise from `Atomics.waitAsync` has been resolved runtime .execute_script( "pump_message_loop3.js", r#"assertEquals(globalThis.resolved, true);"#, ) .unwrap(); }) } #[test] fn test_core_js_stack_frame() { let mut runtime = JsRuntime::new(RuntimeOptions::default()); // Call non-existent op so we get error from `core.js` let error = runtime .execute_script( "core_js_stack_frame.js", "Deno.core.opSync('non_existent');", ) .unwrap_err(); let error_string = error.to_string(); // Test that the script specifier is a URL: `deno:<repo-relative path>`. assert!(error_string.contains("deno:core/01_core.js")); } #[test] fn test_v8_platform() { let options = RuntimeOptions { v8_platform: Some(v8::new_default_platform(0, false).make_shared()), ..Default::default() }; let mut runtime = JsRuntime::new(options); runtime.execute_script("<none>", "").unwrap(); } #[test] fn test_is_proxy() { let mut runtime = JsRuntime::new(RuntimeOptions::default()); let all_true: v8::Global<v8::Value> = runtime .execute_script( "is_proxy.js", r#" (function () { const { isProxy } = Deno.core; const o = { a: 1, b: 2}; const p = new Proxy(o, {}); return isProxy(p) && !isProxy(o) && !isProxy(42); })() "#, ) .unwrap(); let mut scope = runtime.handle_scope(); let all_true = v8::Local::<v8::Value>::new(&mut scope, &all_true); assert!(all_true.is_true()); } #[test] fn test_binding_names() { let mut runtime = JsRuntime::new(RuntimeOptions::default()); let all_true: v8::Global<v8::Value> = runtime .execute_script( "binding_names.js", "Deno.core.encode.toString() === 'function encode() { [native code] }'", ) .unwrap(); let mut scope = runtime.handle_scope(); let all_true = v8::Local::<v8::Value>::new(&mut scope, &all_true); assert!(all_true.is_true()); } #[tokio::test] async fn test_async_opstate_borrow() { struct InnerState(u64); async fn op_async_borrow( op_state: Rc<RefCell<OpState>>, _: (), _: (), ) -> Result<(), Error> { let n = { let op_state = op_state.borrow(); let inner_state = op_state.borrow::<InnerState>(); inner_state.0 }; // Future must be Poll::Pending on first call tokio::time::sleep(std::time::Duration::from_millis(1)).await; if n != 42 { unreachable!(); } Ok(()) } let extension = Extension::builder() .ops(vec![("op_async_borrow", op_async(op_async_borrow))]) .state(|state| { state.put(InnerState(42)); Ok(()) }) .build(); let mut runtime = JsRuntime::new(RuntimeOptions { extensions: vec![extension], ..Default::default() }); runtime .execute_script( "op_async_borrow.js", "Deno.core.opAsync('op_async_borrow')", ) .unwrap(); runtime.run_event_loop(false).await.unwrap(); } #[tokio::test] async fn test_set_macrotask_callback_set_next_tick_callback() { async fn op_async_sleep( _op_state: Rc<RefCell<OpState>>, _: (), _: (), ) -> Result<(), Error> { // Future must be Poll::Pending on first call tokio::time::sleep(std::time::Duration::from_millis(1)).await; Ok(()) } let extension = Extension::builder() .ops(vec![("op_async_sleep", op_async(op_async_sleep))]) .build(); let mut runtime = JsRuntime::new(RuntimeOptions { extensions: vec![extension], ..Default::default() }); runtime .execute_script( "macrotasks_and_nextticks.js", r#" (async function () { const results = []; Deno.core.setMacrotaskCallback(() => { results.push("macrotask"); return true; }); Deno.core.setNextTickCallback(() => { results.push("nextTick"); Deno.core.setHasTickScheduled(false); }); Deno.core.setHasTickScheduled(true); await Deno.core.opAsync('op_async_sleep'); if (results[0] != "nextTick") { throw new Error(`expected nextTick, got: ${results[0]}`); } if (results[1] != "macrotask") { throw new Error(`expected macrotask, got: ${results[1]}`); } })(); "#, ) .unwrap(); runtime.run_event_loop(false).await.unwrap(); } #[tokio::test] async fn test_set_macrotask_callback_set_next_tick_callback_multiple() { let mut runtime = JsRuntime::new(Default::default()); runtime .execute_script( "multiple_macrotasks_and_nextticks.js", r#" Deno.core.setMacrotaskCallback(() => { return true; }); Deno.core.setMacrotaskCallback(() => { return true; }); Deno.core.setNextTickCallback(() => {}); Deno.core.setNextTickCallback(() => {}); "#, ) .unwrap(); let isolate = runtime.v8_isolate(); let state_rc = JsRuntime::state(isolate); let state = state_rc.borrow(); assert_eq!(state.js_macrotask_cbs.len(), 2); assert_eq!(state.js_nexttick_cbs.len(), 2); } #[tokio::test] async fn test_has_tick_scheduled() { run_in_task(|cx| { let macrotask = Arc::new(AtomicUsize::default()); let macrotask_ = Arc::clone(&macrotask); let next_tick = Arc::new(AtomicUsize::default()); let next_tick_ = Arc::clone(&next_tick); let op_macrotask = move |_: &mut OpState, _: (), _: ()| { macrotask_.fetch_add(1, Ordering::Relaxed); Ok(()) }; let op_next_tick = move |_: &mut OpState, _: (), _: ()| { next_tick_.fetch_add(1, Ordering::Relaxed); Ok(()) }; let extension = Extension::builder() .ops(vec![("op_macrotask", op_sync(op_macrotask))]) .ops(vec![("op_next_tick", op_sync(op_next_tick))]) .build(); let mut runtime = JsRuntime::new(RuntimeOptions { extensions: vec![extension], ..Default::default() }); runtime .execute_script( "has_tick_scheduled.js", r#" Deno.core.setMacrotaskCallback(() => { Deno.core.opSync("op_macrotask"); return true; // We're done. }); Deno.core.setNextTickCallback(() => Deno.core.opSync("op_next_tick")); Deno.core.setHasTickScheduled(true); "#, ) .unwrap(); assert!(matches!(runtime.poll_event_loop(cx, false), Poll::Pending)); assert_eq!(1, macrotask.load(Ordering::Relaxed)); assert_eq!(1, next_tick.load(Ordering::Relaxed)); assert!(matches!(runtime.poll_event_loop(cx, false), Poll::Pending)); assert!(matches!(runtime.poll_event_loop(cx, false), Poll::Pending)); assert!(matches!(runtime.poll_event_loop(cx, false), Poll::Pending)); let state_rc = JsRuntime::state(runtime.v8_isolate()); state_rc.borrow_mut().has_tick_scheduled = false; assert!(matches!( runtime.poll_event_loop(cx, false), Poll::Ready(Ok(())) )); assert!(matches!( runtime.poll_event_loop(cx, false), Poll::Ready(Ok(())) )); }); } #[test] fn terminate_during_module_eval() { #[derive(Default)] struct ModsLoader; impl ModuleLoader for ModsLoader { fn resolve( &self, specifier: &str, referrer: &str, _is_main: bool, ) -> Result<ModuleSpecifier, Error> { assert_eq!(specifier, "file:///main.js"); assert_eq!(referrer, "."); let s = crate::resolve_import(specifier, referrer).unwrap(); Ok(s) } fn load( &self, _module_specifier: &ModuleSpecifier, _maybe_referrer: Option<ModuleSpecifier>, _is_dyn_import: bool, ) -> Pin<Box<ModuleSourceFuture>> { async move { Ok(ModuleSource { code: "console.log('hello world');".to_string(), module_url_specified: "file:///main.js".to_string(), module_url_found: "file:///main.js".to_string(), }) } .boxed_local() } } let loader = std::rc::Rc::new(ModsLoader::default()); let mut runtime = JsRuntime::new(RuntimeOptions { module_loader: Some(loader), ..Default::default() }); let specifier = crate::resolve_url("file:///main.js").unwrap(); let source_code = "Deno.core.print('hello\\n')".to_string(); let module_id = futures::executor::block_on( runtime.load_main_module(&specifier, Some(source_code)), ) .unwrap(); runtime.v8_isolate().terminate_execution(); let mod_result = futures::executor::block_on(runtime.mod_evaluate(module_id)).unwrap(); assert!(mod_result .unwrap_err() .to_string() .contains("JavaScript execution has been terminated")); } #[tokio::test] async fn test_set_promise_reject_callback() { let promise_reject = Arc::new(AtomicUsize::default()); let promise_reject_ = Arc::clone(&promise_reject); let uncaught_exception = Arc::new(AtomicUsize::default()); let uncaught_exception_ = Arc::clone(&uncaught_exception); let op_promise_reject = move |_: &mut OpState, _: (), _: ()| { promise_reject_.fetch_add(1, Ordering::Relaxed); Ok(()) }; let op_uncaught_exception = move |_: &mut OpState, _: (), _: ()| { uncaught_exception_.fetch_add(1, Ordering::Relaxed); Ok(()) }; let extension = Extension::builder() .ops(vec![("op_promise_reject", op_sync(op_promise_reject))]) .ops(vec![( "op_uncaught_exception", op_sync(op_uncaught_exception), )]) .build(); let mut runtime = JsRuntime::new(RuntimeOptions { extensions: vec![extension], ..Default::default() }); runtime .execute_script( "promise_reject_callback.js", r#" // Note: |promise| is not the promise created below, it's a child. Deno.core.setPromiseRejectCallback((type, promise, reason) => { if (type !== /* PromiseRejectWithNoHandler */ 0) { throw Error("unexpected type: " + type); } if (reason.message !== "reject") { throw Error("unexpected reason: " + reason); } Deno.core.opSync("op_promise_reject"); throw Error("promiseReject"); // Triggers uncaughtException handler. }); Deno.core.setUncaughtExceptionCallback((err) => { if (err.message !== "promiseReject") throw err; Deno.core.opSync("op_uncaught_exception"); }); new Promise((_, reject) => reject(Error("reject"))); "#, ) .unwrap(); runtime.run_event_loop(false).await.unwrap(); assert_eq!(1, promise_reject.load(Ordering::Relaxed)); assert_eq!(1, uncaught_exception.load(Ordering::Relaxed)); runtime .execute_script( "promise_reject_callback.js", r#" { const prev = Deno.core.setPromiseRejectCallback((...args) => { prev(...args); }); } { const prev = Deno.core.setUncaughtExceptionCallback((...args) => { prev(...args); throw Error("fail"); }); } new Promise((_, reject) => reject(Error("reject"))); "#, ) .unwrap(); // Exception from uncaughtException handler doesn't bubble up but is // printed to stderr. runtime.run_event_loop(false).await.unwrap(); assert_eq!(2, promise_reject.load(Ordering::Relaxed)); assert_eq!(2, uncaught_exception.load(Ordering::Relaxed)); } }
31.934307
162
0.635474
ccba2fefea04d3bd05c2d694e77cd1ae1f9d85cf
2,623
use std::fs::{self, read_to_string}; use std::path::Path; #[derive(Debug)] pub struct InputCapability { name: String, raw_value: String, } #[derive(Debug)] pub struct InputDevice { name: String, file_name: String, capabilities: Vec<InputCapability>, properties: String, } #[derive(Debug)] pub struct EventDevice { file_name: String, device_name: String, } #[derive(Debug)] pub enum Device { Input(InputDevice), Event(EventDevice), } fn read_first_line_of_file(path: &str) -> Result<String, std::io::Error> { let cap_value = read_to_string(path)?; let mut cap_lines = cap_value.lines(); Ok(cap_lines.next().unwrap().to_string()) } pub fn input_devices() -> Result<Vec<Device>, std::io::Error> { let mut v = vec![]; for entry in fs::read_dir("/sys/class/input/")? { let entry = entry?; let name = entry.file_name().into_string().unwrap(); if name.starts_with("input") { let mut caps = vec![]; let caps_path = format!("/sys/class/input/{}/capabilities", name); if Path::new(&caps_path).exists() { for caps_entry in fs::read_dir(&caps_path)? { let caps_entry = caps_entry?; let cap_name = caps_entry.file_name().into_string().unwrap(); caps.push(InputCapability { name: cap_name.clone(), raw_value: read_first_line_of_file(&format!( "/sys/class/input/{}/capabilities/{}", name, cap_name ))?, }); } v.push(Device::Input(InputDevice { name: read_first_line_of_file(&format!("/sys/class/input/{}/name", name))?, file_name: entry.file_name().into_string().unwrap(), capabilities: caps, properties: read_first_line_of_file(&format!( "/sys/class/input/{}/properties", name ))?, })) } } else if name.starts_with("event") { let link = fs::read_link(&format!("/sys/class/input/{}/device", name))?; v.push(Device::Event(EventDevice { file_name: entry.file_name().into_string().unwrap(), device_name: link .file_name() .unwrap() .to_os_string() .into_string() .unwrap(), })) } } Ok(v) }
31.987805
95
0.507434
ab0a2c2f2dbc7bec89b88c0f1907ebf57f7c2c62
4,781
#[cfg(test)] #[path = "../../tests/unit/checker/relations_test.rs"] mod relations_test; use super::*; use std::collections::HashSet; /// Checks relation rules. pub fn check_relations(context: &CheckerContext) -> Result<(), String> { let reserved_ids = vec!["departure", "arrival", "break", "reload"].into_iter().collect::<HashSet<_>>(); (0_usize..) .zip(context.problem.plan.relations.as_ref().map_or(vec![].iter(), |relations| relations.iter())) .try_for_each(|(idx, relation)| { let tour = get_tour_by_vehicle_id(&relation.vehicle_id, relation.shift_index, &context.solution); // NOTE tour can be absent for tour relation let tour = if tour.is_err() { return match relation.type_field { RelationType::Any => Ok(()), _ => tour.map(|_| ()), }; } else { tour.unwrap() }; let activity_ids = get_activity_ids(&tour); let relation_ids = relation.jobs.iter().collect::<HashSet<_>>(); let expected_relation_count = relation_ids.iter().try_fold(0, |acc, job_id| { if let Some(job) = context.get_job_by_id(job_id) { Ok(acc + job.pickups.as_ref().map_or(0, |t| t.len()) + job.deliveries.as_ref().map_or(0, |t| t.len()) + job.replacements.as_ref().map_or(0, |t| t.len()) + job.services.as_ref().map_or(0, |t| t.len())) } else if reserved_ids.contains(job_id.as_str()) { Ok(acc + 1) } else { Err(format!("Relation has unknown job id: {}", job_id)) } })?; if expected_relation_count != relation.jobs.len() { return Err(format!("Relation {} contains duplicated ids: {:?}", idx, relation.jobs)); } match relation.type_field { RelationType::Strict => { let common = intersection(activity_ids.clone(), relation.jobs.clone()); if common != relation.jobs { Err(format!( "Relation {} does not follow strict rule: expected {:?}, got {:?}, common: {:?}", idx, relation.jobs, activity_ids, common )) } else { Ok(()) } } RelationType::Sequence => { let ids = activity_ids.iter().filter(|id| relation_ids.contains(id)).cloned().collect::<Vec<_>>(); if ids != relation.jobs { Err(format!( "Relation {} does not follow sequence rule: expected {:?}, got {:?}, common: {:?}", idx, relation.jobs, activity_ids, ids )) } else { Ok(()) } } RelationType::Any => { let has_wrong_assignment = context .solution .tours .iter() .filter(|other| tour.vehicle_id != other.vehicle_id) .any(|tour| get_activity_ids(tour).iter().any(|id| relation_ids.contains(id))); if has_wrong_assignment { Err(format!("Relation {} has jobs assigned to another tour", idx)) } else { Ok(()) } } } })?; Ok(()) } fn get_tour_by_vehicle_id(vehicle_id: &str, shift_index: Option<usize>, solution: &Solution) -> Result<Tour, String> { solution .tours .iter() .find(|tour| tour.vehicle_id == vehicle_id && tour.shift_index == shift_index.unwrap_or(0)) .cloned() .ok_or_else(|| format!("Cannot find tour for '{}'", vehicle_id)) } fn get_activity_ids(tour: &Tour) -> Vec<String> { tour.stops .iter() .flat_map(|stop| { // TODO consider job tags within multi jobs stop.activities.iter().map(|a| a.job_id.clone()) }) .collect() } fn intersection<T>(left: Vec<T>, right: Vec<T>) -> Vec<T> where T: PartialEq, { let mut common = Vec::new(); let mut right = right; for e1 in left.into_iter() { if let Some(pos) = right.iter().position(|e2| e1 == *e2) { common.push(e1); right.remove(pos); } else { if !common.is_empty() { break; } } } common }
36.776923
118
0.467475
e699f6f876ea88a365f628f335a644a3b11d4289
369
use std::os::raw; use super::ErrorCode; extern "C" { pub fn c_Logger_Close(); pub fn c_Logger_Open(language: raw::c_int, logDriverStation: bool); pub fn c_Logger_Log( code: ErrorCode, device: *const raw::c_char, func: *const raw::c_char, hierarchy: raw::c_int, stacktrace: *const raw::c_char, ) -> ErrorCode; }
23.0625
71
0.612466
018f6bf3e8b8b32d48571137876ba4948a9f0d28
3,145
#[doc = "Register `RA` reader"] pub struct R(crate::R<RA_SPEC>); impl core::ops::Deref for R { type Target = crate::R<RA_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<RA_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<RA_SPEC>) -> Self { R(reader) } } #[doc = "Register `RA` writer"] pub struct W(crate::W<RA_SPEC>); impl core::ops::Deref for W { type Target = crate::W<RA_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<RA_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<RA_SPEC>) -> Self { W(writer) } } #[doc = "Field `RA` reader - "] pub struct RA_R(crate::FieldReader<bool, bool>); impl RA_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { RA_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for RA_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RA` writer - "] pub struct RA_W<'a> { w: &'a mut W, } impl<'a> RA_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } impl R { #[doc = "Bit 0"] #[inline(always)] pub fn ra(&self) -> RA_R { RA_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0"] #[inline(always)] pub fn ra(&mut self) -> RA_W { RA_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Description cluster\\[n\\]: Read access to peripheral region n detected\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ra](index.html) module"] pub struct RA_SPEC; impl crate::RegisterSpec for RA_SPEC { type Ux = u32; } #[doc = "`read()` method returns [ra::R](R) reader structure"] impl crate::Readable for RA_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [ra::W](W) writer structure"] impl crate::Writable for RA_SPEC { type Writer = W; } #[doc = "`reset()` method sets RA to value 0"] impl crate::Resettable for RA_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
27.587719
454
0.570747
d7d3a70f3c7c5d2ab5f941bd8f867e2f8f90ae4c
67,625
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Feature gating //! //! This module implements the gating necessary for preventing certain compiler //! features from being used by default. This module will crawl a pre-expanded //! AST to ensure that there are no features which are used that are not //! enabled. //! //! Features are enabled in programs via the crate-level attributes of //! `#![feature(...)]` with a comma-separated list of features. //! //! For the purpose of future feature-tracking, once code for detection of feature //! gate usage is added, *do not remove it again* even once the feature //! becomes stable. use self::AttributeType::*; use self::AttributeGate::*; use abi::Abi; use ast::{self, NodeId, PatKind, RangeEnd}; use attr; use codemap::Spanned; use syntax_pos::Span; use errors::{DiagnosticBuilder, Handler, FatalError}; use visit::{self, FnKind, Visitor}; use parse::ParseSess; use symbol::Symbol; use std::ascii::AsciiExt; use std::env; macro_rules! setter { ($field: ident) => {{ fn f(features: &mut Features) -> &mut bool { &mut features.$field } f as fn(&mut Features) -> &mut bool }} } macro_rules! declare_features { ($((active, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents active features that are currently being implemented or /// currently being considered for addition/removal. const ACTIVE_FEATURES: &'static [(&'static str, &'static str, Option<u32>, fn(&mut Features) -> &mut bool)] = &[ $((stringify!($feature), $ver, $issue, setter!($feature))),+ ]; /// A set of features to be used by later passes. pub struct Features { /// #![feature] attrs for stable language features, for error reporting pub declared_stable_lang_features: Vec<(Symbol, Span)>, /// #![feature] attrs for non-language (library) features pub declared_lib_features: Vec<(Symbol, Span)>, $(pub $feature: bool),+ } impl Features { pub fn new() -> Features { Features { declared_stable_lang_features: Vec::new(), declared_lib_features: Vec::new(), $($feature: false),+ } } } }; ($((removed, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents unstable features which have since been removed (it was once Active) const REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((stable_removed, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents stable features which have since been removed (it was once Accepted) const STABLE_REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((accepted, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Those language feature has since been Accepted (it was once Active) const ACCEPTED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; } } // If you change this, please modify src/doc/unstable-book as well. // // Don't ever remove anything from this list; set them to 'Removed'. // // The version numbers here correspond to the version in which the current status // was set. This is most important for knowing when a particular feature became // stable (active). // // NB: The featureck.py script parses this information directly out of the source // so take care when modifying it. declare_features! ( (active, asm, "1.0.0", Some(29722)), (active, compile_error, "1.20.0", Some(40872)), (active, concat_idents, "1.0.0", Some(29599)), (active, link_args, "1.0.0", Some(29596)), (active, log_syntax, "1.0.0", Some(29598)), (active, non_ascii_idents, "1.0.0", Some(28979)), (active, plugin_registrar, "1.0.0", Some(29597)), (active, thread_local, "1.0.0", Some(29594)), (active, trace_macros, "1.0.0", Some(29598)), // rustc internal, for now: (active, intrinsics, "1.0.0", None), (active, lang_items, "1.0.0", None), (active, link_llvm_intrinsics, "1.0.0", Some(29602)), (active, linkage, "1.0.0", Some(29603)), (active, quote, "1.0.0", Some(29601)), (active, simd, "1.0.0", Some(27731)), // rustc internal (active, rustc_diagnostic_macros, "1.0.0", None), (active, advanced_slice_patterns, "1.0.0", Some(23121)), (active, box_syntax, "1.0.0", Some(27779)), (active, placement_in_syntax, "1.0.0", Some(27779)), (active, unboxed_closures, "1.0.0", Some(29625)), (active, allocator, "1.0.0", Some(27389)), (active, fundamental, "1.0.0", Some(29635)), (active, main, "1.0.0", Some(29634)), (active, needs_allocator, "1.4.0", Some(27389)), (active, on_unimplemented, "1.0.0", Some(29628)), (active, plugin, "1.0.0", Some(29597)), (active, simd_ffi, "1.0.0", Some(27731)), (active, start, "1.0.0", Some(29633)), (active, structural_match, "1.8.0", Some(31434)), (active, panic_runtime, "1.10.0", Some(32837)), (active, needs_panic_runtime, "1.10.0", Some(32837)), // OIBIT specific features (active, optin_builtin_traits, "1.0.0", Some(13231)), // macro reexport needs more discussion and stabilization (active, macro_reexport, "1.0.0", Some(29638)), // Allows use of #[staged_api] // rustc internal (active, staged_api, "1.0.0", None), // Allows using #![no_core] (active, no_core, "1.3.0", Some(29639)), // Allows using `box` in patterns; RFC 469 (active, box_patterns, "1.0.0", Some(29641)), // Allows using the unsafe_destructor_blind_to_params attribute; // RFC 1238 (active, dropck_parametricity, "1.3.0", Some(28498)), // Allows using the may_dangle attribute; RFC 1327 (active, dropck_eyepatch, "1.10.0", Some(34761)), // Allows the use of custom attributes; RFC 572 (active, custom_attribute, "1.0.0", Some(29642)), // Allows the use of #[derive(Anything)] as sugar for // #[derive_Anything]. (active, custom_derive, "1.0.0", Some(29644)), // Allows the use of rustc_* attributes; RFC 572 (active, rustc_attrs, "1.0.0", Some(29642)), // Allows the use of #[allow_internal_unstable]. This is an // attribute on macro_rules! and can't use the attribute handling // below (it has to be checked before expansion possibly makes // macros disappear). // // rustc internal (active, allow_internal_unstable, "1.0.0", None), // #23121. Array patterns have some hazards yet. (active, slice_patterns, "1.0.0", Some(23121)), // Allows the definition of associated constants in `trait` or `impl` // blocks. (active, associated_consts, "1.0.0", Some(29646)), // Allows the definition of `const fn` functions. (active, const_fn, "1.2.0", Some(24111)), // Allows indexing into constant arrays. (active, const_indexing, "1.4.0", Some(29947)), // Allows using #[prelude_import] on glob `use` items. // // rustc internal (active, prelude_import, "1.2.0", None), // Allows default type parameters to influence type inference. (active, default_type_parameter_fallback, "1.3.0", Some(27336)), // Allows associated type defaults (active, associated_type_defaults, "1.2.0", Some(29661)), // allow `repr(simd)`, and importing the various simd intrinsics (active, repr_simd, "1.4.0", Some(27731)), // Allows cfg(target_feature = "..."). (active, cfg_target_feature, "1.4.0", Some(29717)), // allow `extern "platform-intrinsic" { ... }` (active, platform_intrinsics, "1.4.0", Some(27731)), // allow `#[unwind]` // rust runtime internal (active, unwind_attributes, "1.4.0", None), // allow the use of `#[naked]` on functions. (active, naked_functions, "1.9.0", Some(32408)), // allow `#[no_debug]` (active, no_debug, "1.5.0", Some(29721)), // allow `#[omit_gdb_pretty_printer_section]` // rustc internal. (active, omit_gdb_pretty_printer_section, "1.5.0", None), // Allows cfg(target_vendor = "..."). (active, cfg_target_vendor, "1.5.0", Some(29718)), // Allow attributes on expressions and non-item statements (active, stmt_expr_attributes, "1.6.0", Some(15701)), // allow using type ascription in expressions (active, type_ascription, "1.6.0", Some(23416)), // Allows cfg(target_thread_local) (active, cfg_target_thread_local, "1.7.0", Some(29594)), // rustc internal (active, abi_vectorcall, "1.7.0", None), // a...b and ...b (active, inclusive_range_syntax, "1.7.0", Some(28237)), // X..Y patterns (active, exclusive_range_pattern, "1.11.0", Some(37854)), // impl specialization (RFC 1210) (active, specialization, "1.7.0", Some(31844)), // Allow Drop types in statics/const functions (RFC 1440) (active, drop_types_in_const, "1.9.0", Some(33156)), // Allows cfg(target_has_atomic = "..."). (active, cfg_target_has_atomic, "1.9.0", Some(32976)), // Allows `impl Trait` in function return types. (active, conservative_impl_trait, "1.12.0", Some(34511)), // The `!` type (active, never_type, "1.13.0", Some(35121)), // Allows all literals in attribute lists and values of key-value pairs. (active, attr_literals, "1.13.0", Some(34981)), // Allows the sysV64 ABI to be specified on all platforms // instead of just the platforms on which it is the C ABI (active, abi_sysv64, "1.13.0", Some(36167)), // Allows untagged unions `union U { ... }` (active, untagged_unions, "1.13.0", Some(32836)), // Used to identify the `compiler_builtins` crate // rustc internal (active, compiler_builtins, "1.13.0", None), // Allows attributes on lifetime/type formal parameters in generics (RFC 1327) (active, generic_param_attrs, "1.11.0", Some(34761)), // Allows #[link(..., cfg(..))] (active, link_cfg, "1.14.0", Some(37406)), (active, use_extern_macros, "1.15.0", Some(35896)), // Allows #[target_feature(...)] (active, target_feature, "1.15.0", None), // `extern "ptx-*" fn()` (active, abi_ptx, "1.15.0", None), // The `i128` type (active, i128_type, "1.16.0", Some(35118)), // The `unadjusted` ABI. Perma unstable. (active, abi_unadjusted, "1.16.0", None), // Procedural macros 2.0. (active, proc_macro, "1.16.0", Some(38356)), // Declarative macros 2.0 (`macro`). (active, decl_macro, "1.17.0", Some(39412)), // Allows #[link(kind="static-nobundle"...] (active, static_nobundle, "1.16.0", Some(37403)), // `extern "msp430-interrupt" fn()` (active, abi_msp430_interrupt, "1.16.0", Some(38487)), // Used to identify crates that contain sanitizer runtimes // rustc internal (active, sanitizer_runtime, "1.17.0", None), // Used to identify crates that contain the profiler runtime // rustc internal (active, profiler_runtime, "1.18.0", None), // `extern "x86-interrupt" fn()` (active, abi_x86_interrupt, "1.17.0", Some(40180)), // Allows the `catch {...}` expression (active, catch_expr, "1.17.0", Some(31436)), // Allows `repr(align(u16))` struct attribute (RFC 1358) (active, repr_align, "1.17.0", Some(33626)), // See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work. (active, rvalue_static_promotion, "1.15.1", Some(38865)), // Used to preserve symbols (see llvm.used) (active, used, "1.18.0", Some(40289)), // Allows module-level inline assembly by way of global_asm!() (active, global_asm, "1.18.0", Some(35119)), // Allows overlapping impls of marker traits (active, overlapping_marker_traits, "1.18.0", Some(29864)), // Allows use of the :vis macro fragment specifier (active, macro_vis_matcher, "1.18.0", Some(41022)), // rustc internal (active, abi_thiscall, "1.19.0", None), ); declare_features! ( (removed, import_shadowing, "1.0.0", None), (removed, managed_boxes, "1.0.0", None), // Allows use of unary negate on unsigned integers, e.g. -e for e: u8 (removed, negate_unsigned, "1.0.0", Some(29645)), (removed, reflect, "1.0.0", Some(27749)), // A way to temporarily opt out of opt in copy. This will *never* be accepted. (removed, opt_out_copy, "1.0.0", None), (removed, quad_precision_float, "1.0.0", None), (removed, struct_inherit, "1.0.0", None), (removed, test_removed_feature, "1.0.0", None), (removed, visible_private_types, "1.0.0", None), (removed, unsafe_no_drop_flag, "1.0.0", None), // Allows using items which are missing stability attributes // rustc internal (removed, unmarked_api, "1.0.0", None), (removed, pushpop_unsafe, "1.2.0", None), ); declare_features! ( (stable_removed, no_stack_check, "1.0.0", None), ); declare_features! ( (accepted, associated_types, "1.0.0", None), // allow overloading augmented assignment operations like `a += b` (accepted, augmented_assignments, "1.8.0", Some(28235)), // allow empty structs and enum variants with braces (accepted, braced_empty_structs, "1.8.0", Some(29720)), (accepted, default_type_params, "1.0.0", None), (accepted, globs, "1.0.0", None), (accepted, if_let, "1.0.0", None), // A temporary feature gate used to enable parser extensions needed // to bootstrap fix for #5723. (accepted, issue_5723_bootstrap, "1.0.0", None), (accepted, macro_rules, "1.0.0", None), // Allows using #![no_std] (accepted, no_std, "1.6.0", None), (accepted, slicing_syntax, "1.0.0", None), (accepted, struct_variant, "1.0.0", None), // These are used to test this portion of the compiler, they don't actually // mean anything (accepted, test_accepted_feature, "1.0.0", None), (accepted, tuple_indexing, "1.0.0", None), // Allows macros to appear in the type position. (accepted, type_macros, "1.13.0", Some(27245)), (accepted, while_let, "1.0.0", None), // Allows `#[deprecated]` attribute (accepted, deprecated, "1.9.0", Some(29935)), // `expr?` (accepted, question_mark, "1.13.0", Some(31436)), // Allows `..` in tuple (struct) patterns (accepted, dotdot_in_tuple_patterns, "1.14.0", Some(33627)), (accepted, item_like_imports, "1.15.0", Some(35120)), // Allows using `Self` and associated types in struct expressions and patterns. (accepted, more_struct_aliases, "1.16.0", Some(37544)), // elide `'static` lifetimes in `static`s and `const`s (accepted, static_in_const, "1.17.0", Some(35897)), // Allows field shorthands (`x` meaning `x: x`) in struct literal expressions. (accepted, field_init_shorthand, "1.17.0", Some(37340)), // Allows the definition recursive static items. (accepted, static_recursion, "1.17.0", Some(29719)), // pub(restricted) visibilities (RFC 1422) (accepted, pub_restricted, "1.18.0", Some(32409)), // The #![windows_subsystem] attribute (accepted, windows_subsystem, "1.18.0", Some(37499)), // Allows `break {expr}` with a value inside `loop`s. (accepted, loop_break_value, "1.19.0", Some(37339)), // Permits numeric fields in struct expressions and patterns. (accepted, relaxed_adts, "1.19.0", Some(35626)), // Coerces non capturing closures to function pointers (accepted, closure_to_fn_coercion, "1.19.0", Some(39817)), // Allows attributes on struct literal fields. (accepted, struct_field_attributes, "1.20.0", Some(38814)), ); // If you change this, please modify src/doc/unstable-book as well. You must // move that documentation into the relevant place in the other docs, and // remove the chapter on the flag. #[derive(PartialEq, Copy, Clone, Debug)] pub enum AttributeType { /// Normal, builtin attribute that is consumed /// by the compiler before the unused_attribute check Normal, /// Builtin attribute that may not be consumed by the compiler /// before the unused_attribute check. These attributes /// will be ignored by the unused_attribute lint Whitelisted, /// Builtin attribute that is only allowed at the crate level CrateLevel, } pub enum AttributeGate { /// Is gated by a given feature gate, reason /// and function to check if enabled Gated(Stability, &'static str, &'static str, fn(&Features) -> bool), /// Ungated attribute, can be used on all release channels Ungated, } impl AttributeGate { fn is_deprecated(&self) -> bool { match *self { Gated(Stability::Deprecated(_), ..) => true, _ => false, } } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Stability { Unstable, // Argument is tracking issue link. Deprecated(&'static str), } // fn() is not Debug impl ::std::fmt::Debug for AttributeGate { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { match *self { Gated(ref stab, name, expl, _) => write!(fmt, "Gated({:?}, {}, {})", stab, name, expl), Ungated => write!(fmt, "Ungated") } } } macro_rules! cfg_fn { ($field: ident) => {{ fn f(features: &Features) -> bool { features.$field } f as fn(&Features) -> bool }} } pub fn deprecated_attributes() -> Vec<&'static (&'static str, AttributeType, AttributeGate)> { BUILTIN_ATTRIBUTES.iter().filter(|a| a.2.is_deprecated()).collect() } pub fn is_builtin_attr(attr: &ast::Attribute) -> bool { BUILTIN_ATTRIBUTES.iter().any(|&(builtin_name, _, _)| attr.check_name(builtin_name)) } // Attributes that have a special meaning to rustc or rustdoc pub const BUILTIN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGate)] = &[ // Normal attributes ("warn", Normal, Ungated), ("allow", Normal, Ungated), ("forbid", Normal, Ungated), ("deny", Normal, Ungated), ("macro_reexport", Normal, Ungated), ("macro_use", Normal, Ungated), ("macro_export", Normal, Ungated), ("plugin_registrar", Normal, Ungated), ("cfg", Normal, Ungated), ("cfg_attr", Normal, Ungated), ("main", Normal, Ungated), ("start", Normal, Ungated), ("test", Normal, Ungated), ("bench", Normal, Ungated), ("simd", Normal, Ungated), ("repr", Normal, Ungated), ("path", Normal, Ungated), ("abi", Normal, Ungated), ("automatically_derived", Normal, Ungated), ("no_mangle", Normal, Ungated), ("no_link", Normal, Ungated), ("derive", Normal, Ungated), ("should_panic", Normal, Ungated), ("ignore", Normal, Ungated), ("no_implicit_prelude", Normal, Ungated), ("reexport_test_harness_main", Normal, Ungated), ("link_args", Normal, Ungated), ("macro_escape", Normal, Ungated), // RFC #1445. ("structural_match", Whitelisted, Gated(Stability::Unstable, "structural_match", "the semantics of constant patterns is \ not yet settled", cfg_fn!(structural_match))), ("plugin", CrateLevel, Gated(Stability::Unstable, "plugin", "compiler plugins are experimental \ and possibly buggy", cfg_fn!(plugin))), ("no_std", CrateLevel, Ungated), ("no_core", CrateLevel, Gated(Stability::Unstable, "no_core", "no_core is experimental", cfg_fn!(no_core))), ("lang", Normal, Gated(Stability::Unstable, "lang_items", "language items are subject to change", cfg_fn!(lang_items))), ("linkage", Whitelisted, Gated(Stability::Unstable, "linkage", "the `linkage` attribute is experimental \ and not portable across platforms", cfg_fn!(linkage))), ("thread_local", Whitelisted, Gated(Stability::Unstable, "thread_local", "`#[thread_local]` is an experimental feature, and does \ not currently handle destructors. There is no \ corresponding `#[task_local]` mapping to the task \ model", cfg_fn!(thread_local))), ("rustc_on_unimplemented", Normal, Gated(Stability::Unstable, "on_unimplemented", "the `#[rustc_on_unimplemented]` attribute \ is an experimental feature", cfg_fn!(on_unimplemented))), ("allocator", Whitelisted, Gated(Stability::Unstable, "allocator", "the `#[allocator]` attribute is an experimental feature", cfg_fn!(allocator))), ("needs_allocator", Normal, Gated(Stability::Unstable, "needs_allocator", "the `#[needs_allocator]` \ attribute is an experimental \ feature", cfg_fn!(needs_allocator))), ("panic_runtime", Whitelisted, Gated(Stability::Unstable, "panic_runtime", "the `#[panic_runtime]` attribute is \ an experimental feature", cfg_fn!(panic_runtime))), ("needs_panic_runtime", Whitelisted, Gated(Stability::Unstable, "needs_panic_runtime", "the `#[needs_panic_runtime]` \ attribute is an experimental \ feature", cfg_fn!(needs_panic_runtime))), ("rustc_variance", Normal, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_variance]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_error", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_error]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_if_this_changed", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_then_this_would_need", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_dirty", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_dirty]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_clean", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_clean]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_metadata_dirty", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_metadata_dirty]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_metadata_clean", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_metadata_clean]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_reused", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_translated", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_symbol_name", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_item_path", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_mir", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_mir]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_inherit_overflow_checks", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_inherit_overflow_checks]` \ attribute is just used to control \ overflow checking behavior of several \ libcore functions that are inlined \ across crates and will never be stable", cfg_fn!(rustc_attrs))), ("compiler_builtins", Whitelisted, Gated(Stability::Unstable, "compiler_builtins", "the `#[compiler_builtins]` attribute is used to \ identify the `compiler_builtins` crate which \ contains compiler-rt intrinsics and will never be \ stable", cfg_fn!(compiler_builtins))), ("sanitizer_runtime", Whitelisted, Gated(Stability::Unstable, "sanitizer_runtime", "the `#[sanitizer_runtime]` attribute is used to \ identify crates that contain the runtime of a \ sanitizer and will never be stable", cfg_fn!(sanitizer_runtime))), ("profiler_runtime", Whitelisted, Gated(Stability::Unstable, "profiler_runtime", "the `#[profiler_runtime]` attribute is used to \ identify the `profiler_builtins` crate which \ contains the profiler runtime and will never be \ stable", cfg_fn!(profiler_runtime))), ("allow_internal_unstable", Normal, Gated(Stability::Unstable, "allow_internal_unstable", EXPLAIN_ALLOW_INTERNAL_UNSTABLE, cfg_fn!(allow_internal_unstable))), ("fundamental", Whitelisted, Gated(Stability::Unstable, "fundamental", "the `#[fundamental]` attribute \ is an experimental feature", cfg_fn!(fundamental))), ("proc_macro_derive", Normal, Ungated), ("rustc_copy_clone_marker", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal implementation detail", cfg_fn!(rustc_attrs))), // FIXME: #14408 whitelist docs since rustdoc looks at them ("doc", Whitelisted, Ungated), // FIXME: #14406 these are processed in trans, which happens after the // lint pass ("cold", Whitelisted, Ungated), ("naked", Whitelisted, Gated(Stability::Unstable, "naked_functions", "the `#[naked]` attribute \ is an experimental feature", cfg_fn!(naked_functions))), ("target_feature", Whitelisted, Gated( Stability::Unstable, "target_feature", "the `#[target_feature]` attribute is an experimental feature", cfg_fn!(target_feature))), ("export_name", Whitelisted, Ungated), ("inline", Whitelisted, Ungated), ("link", Whitelisted, Ungated), ("link_name", Whitelisted, Ungated), ("link_section", Whitelisted, Ungated), ("no_builtins", Whitelisted, Ungated), ("no_mangle", Whitelisted, Ungated), ("no_debug", Whitelisted, Gated( Stability::Deprecated("https://github.com/rust-lang/rust/issues/29721"), "no_debug", "the `#[no_debug]` attribute is an experimental feature", cfg_fn!(no_debug))), ("omit_gdb_pretty_printer_section", Whitelisted, Gated(Stability::Unstable, "omit_gdb_pretty_printer_section", "the `#[omit_gdb_pretty_printer_section]` \ attribute is just used for the Rust test \ suite", cfg_fn!(omit_gdb_pretty_printer_section))), ("unsafe_destructor_blind_to_params", Normal, Gated(Stability::Deprecated("https://github.com/rust-lang/rust/issues/34761"), "dropck_parametricity", "unsafe_destructor_blind_to_params has been replaced by \ may_dangle and will be removed in the future", cfg_fn!(dropck_parametricity))), ("may_dangle", Normal, Gated(Stability::Unstable, "dropck_eyepatch", "may_dangle has unstable semantics and may be removed in the future", cfg_fn!(dropck_eyepatch))), ("unwind", Whitelisted, Gated(Stability::Unstable, "unwind_attributes", "#[unwind] is experimental", cfg_fn!(unwind_attributes))), ("used", Whitelisted, Gated( Stability::Unstable, "used", "the `#[used]` attribute is an experimental feature", cfg_fn!(used))), // used in resolve ("prelude_import", Whitelisted, Gated(Stability::Unstable, "prelude_import", "`#[prelude_import]` is for use by rustc only", cfg_fn!(prelude_import))), // FIXME: #14407 these are only looked at on-demand so we can't // guarantee they'll have already been checked ("rustc_deprecated", Whitelisted, Ungated), ("must_use", Whitelisted, Ungated), ("stable", Whitelisted, Ungated), ("unstable", Whitelisted, Ungated), ("deprecated", Normal, Ungated), ("rustc_paren_sugar", Normal, Gated(Stability::Unstable, "unboxed_closures", "unboxed_closures are still evolving", cfg_fn!(unboxed_closures))), ("windows_subsystem", Whitelisted, Ungated), ("proc_macro_attribute", Normal, Gated(Stability::Unstable, "proc_macro", "attribute proc macros are currently unstable", cfg_fn!(proc_macro))), ("proc_macro", Normal, Gated(Stability::Unstable, "proc_macro", "function-like proc macros are currently unstable", cfg_fn!(proc_macro))), ("rustc_derive_registrar", Normal, Gated(Stability::Unstable, "rustc_derive_registrar", "used internally by rustc", cfg_fn!(rustc_attrs))), // Crate level attributes ("crate_name", CrateLevel, Ungated), ("crate_type", CrateLevel, Ungated), ("crate_id", CrateLevel, Ungated), ("feature", CrateLevel, Ungated), ("no_start", CrateLevel, Ungated), ("no_main", CrateLevel, Ungated), ("no_builtins", CrateLevel, Ungated), ("recursion_limit", CrateLevel, Ungated), ("type_length_limit", CrateLevel, Ungated), ]; // cfg(...)'s that are feature gated const GATED_CFGS: &[(&str, &str, fn(&Features) -> bool)] = &[ // (name in cfg, feature, function to check if the feature is enabled) ("target_feature", "cfg_target_feature", cfg_fn!(cfg_target_feature)), ("target_vendor", "cfg_target_vendor", cfg_fn!(cfg_target_vendor)), ("target_thread_local", "cfg_target_thread_local", cfg_fn!(cfg_target_thread_local)), ("target_has_atomic", "cfg_target_has_atomic", cfg_fn!(cfg_target_has_atomic)), ]; #[derive(Debug, Eq, PartialEq)] pub struct GatedCfg { span: Span, index: usize, } impl GatedCfg { pub fn gate(cfg: &ast::MetaItem) -> Option<GatedCfg> { let name = cfg.name().as_str(); GATED_CFGS.iter() .position(|info| info.0 == name) .map(|idx| { GatedCfg { span: cfg.span, index: idx } }) } pub fn check_and_emit(&self, sess: &ParseSess, features: &Features) { let (cfg, feature, has_feature) = GATED_CFGS[self.index]; if !has_feature(features) && !self.span.allows_unstable() { let explain = format!("`cfg({})` is experimental and subject to change", cfg); emit_feature_err(sess, feature, self.span, GateIssue::Language, &explain); } } } struct Context<'a> { features: &'a Features, parse_sess: &'a ParseSess, plugin_attributes: &'a [(String, AttributeType)], } macro_rules! gate_feature_fn { ($cx: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr) => {{ let (cx, has_feature, span, name, explain) = ($cx, $has_feature, $span, $name, $explain); let has_feature: bool = has_feature(&$cx.features); debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature); if !has_feature && !span.allows_unstable() { emit_feature_err(cx.parse_sess, name, span, GateIssue::Language, explain); } }} } macro_rules! gate_feature { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => { gate_feature_fn!($cx, |x:&Features| x.$feature, $span, stringify!($feature), $explain) } } impl<'a> Context<'a> { fn check_attribute(&self, attr: &ast::Attribute, is_macro: bool) { debug!("check_attribute(attr = {:?})", attr); let name = unwrap_or!(attr.name(), return).as_str(); for &(n, ty, ref gateage) in BUILTIN_ATTRIBUTES { if name == n { if let Gated(_, name, desc, ref has_feature) = *gateage { gate_feature_fn!(self, has_feature, attr.span, name, desc); } debug!("check_attribute: {:?} is builtin, {:?}, {:?}", attr.path, ty, gateage); return; } } for &(ref n, ref ty) in self.plugin_attributes { if attr.path == &**n { // Plugins can't gate attributes, so we don't check for it // unlike the code above; we only use this loop to // short-circuit to avoid the checks below debug!("check_attribute: {:?} is registered by a plugin, {:?}", attr.path, ty); return; } } if name.starts_with("rustc_") { gate_feature!(self, rustc_attrs, attr.span, "unless otherwise specified, attributes \ with the prefix `rustc_` \ are reserved for internal compiler diagnostics"); } else if name.starts_with("derive_") { gate_feature!(self, custom_derive, attr.span, EXPLAIN_DERIVE_UNDERSCORE); } else if !attr::is_known(attr) { // Only run the custom attribute lint during regular // feature gate checking. Macro gating runs // before the plugin attributes are registered // so we skip this then if !is_macro { gate_feature!(self, custom_attribute, attr.span, &format!("The attribute `{}` is currently \ unknown to the compiler and \ may have meaning \ added to it in the future", attr.path)); } } } } pub fn check_attribute(attr: &ast::Attribute, parse_sess: &ParseSess, features: &Features) { let cx = Context { features: features, parse_sess: parse_sess, plugin_attributes: &[] }; cx.check_attribute(attr, true); } pub fn find_lang_feature_accepted_version(feature: &str) -> Option<&'static str> { ACCEPTED_FEATURES.iter().find(|t| t.0 == feature).map(|t| t.1) } fn find_lang_feature_issue(feature: &str) -> Option<u32> { if let Some(info) = ACTIVE_FEATURES.iter().find(|t| t.0 == feature) { let issue = info.2; // FIXME (#28244): enforce that active features have issue numbers // assert!(issue.is_some()) issue } else { // search in Accepted, Removed, or Stable Removed features let found = ACCEPTED_FEATURES.iter().chain(REMOVED_FEATURES).chain(STABLE_REMOVED_FEATURES) .find(|t| t.0 == feature); match found { Some(&(_, _, issue)) => issue, None => panic!("Feature `{}` is not declared anywhere", feature), } } } pub enum GateIssue { Language, Library(Option<u32>) } pub fn emit_feature_err(sess: &ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) { feature_err(sess, feature, span, issue, explain).emit(); } pub fn feature_err<'a>(sess: &'a ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) -> DiagnosticBuilder<'a> { let diag = &sess.span_diagnostic; let issue = match issue { GateIssue::Language => find_lang_feature_issue(feature), GateIssue::Library(lib) => lib, }; let mut err = if let Some(n) = issue { diag.struct_span_err(span, &format!("{} (see issue #{})", explain, n)) } else { diag.struct_span_err(span, explain) }; // #23973: do not suggest `#![feature(...)]` if we are in beta/stable if sess.unstable_features.is_nightly_build() { err.help(&format!("add #![feature({})] to the \ crate attributes to enable", feature)); } err } const EXPLAIN_BOX_SYNTAX: &'static str = "box expression syntax is experimental; you can call `Box::new` instead."; pub const EXPLAIN_STMT_ATTR_SYNTAX: &'static str = "attributes on non-item statements and expressions are experimental."; pub const EXPLAIN_ASM: &'static str = "inline assembly is not stable enough for use and is subject to change"; pub const EXPLAIN_GLOBAL_ASM: &'static str = "`global_asm!` is not stable enough for use and is subject to change"; pub const EXPLAIN_LOG_SYNTAX: &'static str = "`log_syntax!` is not stable enough for use and is subject to change"; pub const EXPLAIN_CONCAT_IDENTS: &'static str = "`concat_idents` is not stable enough for use and is subject to change"; pub const EXPLAIN_COMPILE_ERROR: &'static str = "`compile_error` is not stable enough for use and is subject to change"; pub const EXPLAIN_TRACE_MACROS: &'static str = "`trace_macros` is not stable enough for use and is subject to change"; pub const EXPLAIN_ALLOW_INTERNAL_UNSTABLE: &'static str = "allow_internal_unstable side-steps feature gating and stability checks"; pub const EXPLAIN_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future."; pub const EXPLAIN_DEPR_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future. \ Prefer using procedural macro custom derive."; pub const EXPLAIN_DERIVE_UNDERSCORE: &'static str = "attributes of the form `#[derive_*]` are reserved for the compiler"; pub const EXPLAIN_VIS_MATCHER: &'static str = ":vis fragment specifier is experimental and subject to change"; pub const EXPLAIN_PLACEMENT_IN: &'static str = "placement-in expression syntax is experimental and subject to change."; struct PostExpansionVisitor<'a> { context: &'a Context<'a>, } macro_rules! gate_feature_post { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => {{ let (cx, span) = ($cx, $span); if !span.allows_unstable() { gate_feature!(cx.context, $feature, span, $explain) } }} } impl<'a> PostExpansionVisitor<'a> { fn check_abi(&self, abi: Abi, span: Span) { match abi { Abi::RustIntrinsic => { gate_feature_post!(&self, intrinsics, span, "intrinsics are subject to change"); }, Abi::PlatformIntrinsic => { gate_feature_post!(&self, platform_intrinsics, span, "platform intrinsics are experimental and possibly buggy"); }, Abi::Vectorcall => { gate_feature_post!(&self, abi_vectorcall, span, "vectorcall is experimental and subject to change"); }, Abi::Thiscall => { gate_feature_post!(&self, abi_thiscall, span, "thiscall is experimental and subject to change"); }, Abi::RustCall => { gate_feature_post!(&self, unboxed_closures, span, "rust-call ABI is subject to change"); }, Abi::SysV64 => { gate_feature_post!(&self, abi_sysv64, span, "sysv64 ABI is experimental and subject to change"); }, Abi::PtxKernel => { gate_feature_post!(&self, abi_ptx, span, "PTX ABIs are experimental and subject to change"); }, Abi::Unadjusted => { gate_feature_post!(&self, abi_unadjusted, span, "unadjusted ABI is an implementation detail and perma-unstable"); }, Abi::Msp430Interrupt => { gate_feature_post!(&self, abi_msp430_interrupt, span, "msp430-interrupt ABI is experimental and subject to change"); }, Abi::X86Interrupt => { gate_feature_post!(&self, abi_x86_interrupt, span, "x86-interrupt ABI is experimental and subject to change"); }, // Stable Abi::Cdecl | Abi::Stdcall | Abi::Fastcall | Abi::Aapcs | Abi::Win64 | Abi::Rust | Abi::C | Abi::System => {} } } } fn contains_novel_literal(item: &ast::MetaItem) -> bool { use ast::MetaItemKind::*; use ast::NestedMetaItemKind::*; match item.node { Word => false, NameValue(ref lit) => !lit.node.is_str(), List(ref list) => list.iter().any(|li| { match li.node { MetaItem(ref mi) => contains_novel_literal(mi), Literal(_) => true, } }), } } impl<'a> Visitor<'a> for PostExpansionVisitor<'a> { fn visit_attribute(&mut self, attr: &ast::Attribute) { if !attr.span.allows_unstable() { // check for gated attributes self.context.check_attribute(attr, false); } if self.context.features.proc_macro && attr::is_known(attr) { return } let meta = panictry!(attr.parse_meta(self.context.parse_sess)); if contains_novel_literal(&meta) { gate_feature_post!(&self, attr_literals, attr.span, "non-string literals in attributes, or string \ literals in top-level positions, are experimental"); } } fn visit_name(&mut self, sp: Span, name: ast::Name) { if !name.as_str().is_ascii() { gate_feature_post!(&self, non_ascii_idents, sp, "non-ascii idents are not fully supported."); } } fn visit_item(&mut self, i: &'a ast::Item) { match i.node { ast::ItemKind::ExternCrate(_) => { if attr::contains_name(&i.attrs[..], "macro_reexport") { gate_feature_post!(&self, macro_reexport, i.span, "macros reexports are experimental \ and possibly buggy"); } } ast::ItemKind::ForeignMod(ref foreign_module) => { if attr::contains_name(&i.attrs[..], "link_args") { gate_feature_post!(&self, link_args, i.span, "the `link_args` attribute is not portable \ across platforms, it is recommended to \ use `#[link(name = \"foo\")]` instead") } self.check_abi(foreign_module.abi, i.span); } ast::ItemKind::Fn(..) => { if attr::contains_name(&i.attrs[..], "plugin_registrar") { gate_feature_post!(&self, plugin_registrar, i.span, "compiler plugins are experimental and possibly buggy"); } if attr::contains_name(&i.attrs[..], "start") { gate_feature_post!(&self, start, i.span, "a #[start] function is an experimental \ feature whose signature may change \ over time"); } if attr::contains_name(&i.attrs[..], "main") { gate_feature_post!(&self, main, i.span, "declaration of a nonstandard #[main] \ function may change over time, for now \ a top-level `fn main()` is required"); } } ast::ItemKind::Struct(..) => { if attr::contains_name(&i.attrs[..], "simd") { gate_feature_post!(&self, simd, i.span, "SIMD types are experimental and possibly buggy"); self.context.parse_sess.span_diagnostic.span_warn(i.span, "the `#[simd]` attribute \ is deprecated, use \ `#[repr(simd)]` instead"); } for attr in &i.attrs { if attr.path == "repr" { for item in attr.meta_item_list().unwrap_or_else(Vec::new) { if item.check_name("simd") { gate_feature_post!(&self, repr_simd, i.span, "SIMD types are experimental \ and possibly buggy"); } if item.check_name("align") { gate_feature_post!(&self, repr_align, i.span, "the struct `#[repr(align(u16))]` attribute \ is experimental"); } } } } } ast::ItemKind::DefaultImpl(..) => { gate_feature_post!(&self, optin_builtin_traits, i.span, "default trait implementations are experimental \ and possibly buggy"); } ast::ItemKind::Impl(_, polarity, defaultness, _, _, _, _) => { if polarity == ast::ImplPolarity::Negative { gate_feature_post!(&self, optin_builtin_traits, i.span, "negative trait bounds are not yet fully implemented; \ use marker types for now"); } if let ast::Defaultness::Default = defaultness { gate_feature_post!(&self, specialization, i.span, "specialization is unstable"); } } ast::ItemKind::MacroDef(ast::MacroDef { legacy: false, .. }) => { let msg = "`macro` is experimental"; gate_feature_post!(&self, decl_macro, i.span, msg); } _ => {} } visit::walk_item(self, i); } fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) { let links_to_llvm = match attr::first_attr_value_str_by_name(&i.attrs, "link_name") { Some(val) => val.as_str().starts_with("llvm."), _ => false }; if links_to_llvm { gate_feature_post!(&self, link_llvm_intrinsics, i.span, "linking to LLVM intrinsics is experimental"); } visit::walk_foreign_item(self, i) } fn visit_ty(&mut self, ty: &'a ast::Ty) { match ty.node { ast::TyKind::BareFn(ref bare_fn_ty) => { self.check_abi(bare_fn_ty.abi, ty.span); } ast::TyKind::ImplTrait(..) => { gate_feature_post!(&self, conservative_impl_trait, ty.span, "`impl Trait` is experimental"); } ast::TyKind::Never => { gate_feature_post!(&self, never_type, ty.span, "The `!` type is experimental"); }, _ => {} } visit::walk_ty(self, ty) } fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FunctionRetTy) { if let ast::FunctionRetTy::Ty(ref output_ty) = *ret_ty { if output_ty.node != ast::TyKind::Never { self.visit_ty(output_ty) } } } fn visit_expr(&mut self, e: &'a ast::Expr) { match e.node { ast::ExprKind::Box(_) => { gate_feature_post!(&self, box_syntax, e.span, EXPLAIN_BOX_SYNTAX); } ast::ExprKind::Type(..) => { gate_feature_post!(&self, type_ascription, e.span, "type ascription is experimental"); } ast::ExprKind::Range(_, _, ast::RangeLimits::Closed) => { gate_feature_post!(&self, inclusive_range_syntax, e.span, "inclusive range syntax is experimental"); } ast::ExprKind::InPlace(..) => { gate_feature_post!(&self, placement_in_syntax, e.span, EXPLAIN_PLACEMENT_IN); } ast::ExprKind::Lit(ref lit) => { if let ast::LitKind::Int(_, ref ty) = lit.node { match *ty { ast::LitIntType::Signed(ast::IntTy::I128) | ast::LitIntType::Unsigned(ast::UintTy::U128) => { gate_feature_post!(&self, i128_type, e.span, "128-bit integers are not stable"); } _ => {} } } } ast::ExprKind::Catch(_) => { gate_feature_post!(&self, catch_expr, e.span, "`catch` expression is experimental"); } _ => {} } visit::walk_expr(self, e); } fn visit_pat(&mut self, pattern: &'a ast::Pat) { match pattern.node { PatKind::Slice(_, Some(_), ref last) if !last.is_empty() => { gate_feature_post!(&self, advanced_slice_patterns, pattern.span, "multiple-element slice matches anywhere \ but at the end of a slice (e.g. \ `[0, ..xs, 0]`) are experimental") } PatKind::Slice(..) => { gate_feature_post!(&self, slice_patterns, pattern.span, "slice pattern syntax is experimental"); } PatKind::Box(..) => { gate_feature_post!(&self, box_patterns, pattern.span, "box pattern syntax is experimental"); } PatKind::Range(_, _, RangeEnd::Excluded) => { gate_feature_post!(&self, exclusive_range_pattern, pattern.span, "exclusive range pattern syntax is experimental"); } _ => {} } visit::walk_pat(self, pattern) } fn visit_fn(&mut self, fn_kind: FnKind<'a>, fn_decl: &'a ast::FnDecl, span: Span, _node_id: NodeId) { // check for const fn declarations if let FnKind::ItemFn(_, _, _, Spanned { node: ast::Constness::Const, .. }, _, _, _) = fn_kind { gate_feature_post!(&self, const_fn, span, "const fn is unstable"); } // stability of const fn methods are covered in // visit_trait_item and visit_impl_item below; this is // because default methods don't pass through this // point. match fn_kind { FnKind::ItemFn(_, _, _, _, abi, _, _) | FnKind::Method(_, &ast::MethodSig { abi, .. }, _, _) => { self.check_abi(abi, span); } _ => {} } visit::walk_fn(self, fn_kind, fn_decl, span); } fn visit_trait_item(&mut self, ti: &'a ast::TraitItem) { match ti.node { ast::TraitItemKind::Const(..) => { gate_feature_post!(&self, associated_consts, ti.span, "associated constants are experimental") } ast::TraitItemKind::Method(ref sig, ref block) => { if block.is_none() { self.check_abi(sig.abi, ti.span); } if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ti.span, "const fn is unstable"); } } ast::TraitItemKind::Type(_, Some(_)) => { gate_feature_post!(&self, associated_type_defaults, ti.span, "associated type defaults are unstable"); } _ => {} } visit::walk_trait_item(self, ti); } fn visit_impl_item(&mut self, ii: &'a ast::ImplItem) { if ii.defaultness == ast::Defaultness::Default { gate_feature_post!(&self, specialization, ii.span, "specialization is unstable"); } match ii.node { ast::ImplItemKind::Const(..) => { gate_feature_post!(&self, associated_consts, ii.span, "associated constants are experimental") } ast::ImplItemKind::Method(ref sig, _) => { if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ii.span, "const fn is unstable"); } } _ => {} } visit::walk_impl_item(self, ii); } fn visit_generics(&mut self, g: &'a ast::Generics) { for t in &g.ty_params { if !t.attrs.is_empty() { gate_feature_post!(&self, generic_param_attrs, t.attrs[0].span, "attributes on type parameter bindings are experimental"); } } visit::walk_generics(self, g) } fn visit_lifetime_def(&mut self, lifetime_def: &'a ast::LifetimeDef) { if !lifetime_def.attrs.is_empty() { gate_feature_post!(&self, generic_param_attrs, lifetime_def.attrs[0].span, "attributes on lifetime bindings are experimental"); } visit::walk_lifetime_def(self, lifetime_def) } } pub fn get_features(span_handler: &Handler, krate_attrs: &[ast::Attribute]) -> Features { let mut features = Features::new(); let mut feature_checker = MutexFeatureChecker::default(); for attr in krate_attrs { if !attr.check_name("feature") { continue } match attr.meta_item_list() { None => { span_err!(span_handler, attr.span, E0555, "malformed feature attribute, expected #![feature(...)]"); } Some(list) => { for mi in list { let name = if let Some(word) = mi.word() { word.name() } else { span_err!(span_handler, mi.span, E0556, "malformed feature, expected just one word"); continue }; if let Some(&(_, _, _, setter)) = ACTIVE_FEATURES.iter() .find(|& &(n, _, _, _)| name == n) { *(setter(&mut features)) = true; feature_checker.collect(&features, mi.span); } else if let Some(&(_, _, _)) = REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n) .or_else(|| STABLE_REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n)) { span_err!(span_handler, mi.span, E0557, "feature has been removed"); } else if let Some(&(_, _, _)) = ACCEPTED_FEATURES.iter() .find(|& &(n, _, _)| name == n) { features.declared_stable_lang_features.push((name, mi.span)); } else { features.declared_lib_features.push((name, mi.span)); } } } } } feature_checker.check(span_handler); features } // A collector for mutually-exclusive features and their flag spans #[derive(Default)] struct MutexFeatureChecker { proc_macro: Option<Span>, custom_attribute: Option<Span>, } impl MutexFeatureChecker { // If this method turns out to be a hotspot due to branching, // the branching can be eliminated by modifying `setter!()` to set these spans // only for the features that need to be checked for mutual exclusion. fn collect(&mut self, features: &Features, span: Span) { if features.proc_macro { // If self.proc_macro is None, set to Some(span) self.proc_macro = self.proc_macro.or(Some(span)); } if features.custom_attribute { self.custom_attribute = self.custom_attribute.or(Some(span)); } } fn check(self, handler: &Handler) { if let (Some(pm_span), Some(ca_span)) = (self.proc_macro, self.custom_attribute) { handler.struct_span_err(pm_span, "Cannot use `#![feature(proc_macro)]` and \ `#![feature(custom_attribute)] at the same time") .span_note(ca_span, "`#![feature(custom_attribute)]` declared here") .emit(); panic!(FatalError); } } } pub fn check_crate(krate: &ast::Crate, sess: &ParseSess, features: &Features, plugin_attributes: &[(String, AttributeType)], unstable: UnstableFeatures) { maybe_stage_features(&sess.span_diagnostic, krate, unstable); let ctx = Context { features: features, parse_sess: sess, plugin_attributes: plugin_attributes, }; visit::walk_crate(&mut PostExpansionVisitor { context: &ctx }, krate); } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum UnstableFeatures { /// Hard errors for unstable features are active, as on /// beta/stable channels. Disallow, /// Allow features to be activated, as on nightly. Allow, /// Errors are bypassed for bootstrapping. This is required any time /// during the build that feature-related lints are set to warn or above /// because the build turns on warnings-as-errors and uses lots of unstable /// features. As a result, this is always required for building Rust itself. Cheat } impl UnstableFeatures { pub fn from_environment() -> UnstableFeatures { // Whether this is a feature-staged build, i.e. on the beta or stable channel let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); // Whether we should enable unstable features for bootstrapping let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); match (disable_unstable_features, bootstrap) { (_, true) => UnstableFeatures::Cheat, (true, _) => UnstableFeatures::Disallow, (false, _) => UnstableFeatures::Allow } } pub fn is_nightly_build(&self) -> bool { match *self { UnstableFeatures::Allow | UnstableFeatures::Cheat => true, _ => false, } } } fn maybe_stage_features(span_handler: &Handler, krate: &ast::Crate, unstable: UnstableFeatures) { let allow_features = match unstable { UnstableFeatures::Allow => true, UnstableFeatures::Disallow => false, UnstableFeatures::Cheat => true }; if !allow_features { for attr in &krate.attrs { if attr.check_name("feature") { let release_channel = option_env!("CFG_RELEASE_CHANNEL").unwrap_or("(unknown)"); span_err!(span_handler, attr.span, E0554, "#[feature] may not be used on the {} release channel", release_channel); } } } }
42.398119
100
0.52
c1028c4ed128c5d52d9504b8dd6fccae6794e8c7
1,395
extern crate chrono; extern crate rustc_version; use std::env; use std::fs::File; use std::io::Write; use std::path::PathBuf; use chrono::NaiveDate; fn main() { let meta = rustc_version::version_meta().unwrap(); let commit_date = meta.commit_date.unwrap().parse::<NaiveDate>().unwrap(); if meta.channel == rustc_version::Channel::Dev || commit_date > NaiveDate::from_ymd(2017, 12, 26) { println!("cargo:rustc-cfg=has_termination_lang") } // newest nightlies don't need 'extern crate compiler_builtins' if commit_date < NaiveDate::from_ymd(2018, 04, 07) { println!("cargo:rustc-cfg=needs_cb") } let target = env::var("TARGET").unwrap(); has_fpu(&target); is_armv6m(&target); // Put the linker script somewhere the linker can find it let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap()); File::create(out.join("link.x")) .unwrap() .write_all(include_bytes!("link.x")) .unwrap(); println!("cargo:rustc-link-search={}", out.display()); println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=link.x"); } fn has_fpu(target: &str) { if target.ends_with("eabihf") { println!("cargo:rustc-cfg=has_fpu"); } } fn is_armv6m(target: &str) { if target.starts_with("thumbv6m-") { println!("cargo:rustc-cfg=armv6m"); } }
25.833333
78
0.630824
91ed1028af5749a9228c28b80ebd687c0f62f3ca
683
use std::path::{Path, PathBuf}; use walkdir::DirEntry; use crate::handlers::utils::file_operation_context::FileOperationContext; use crate::util::to_result; fn get_relative_file_name(root: &Path, entry: &DirEntry) -> Result<String, String> { let stripped = entry.path().strip_prefix(&root) .map_err(|_| "cannot strip prefix")?; to_result(stripped.to_str(), "cannot get file name") .map(|x| x.to_string()) } pub fn target_path(context: &FileOperationContext, entry: &DirEntry) -> Result<PathBuf, String> { let file_name = get_relative_file_name(context.source_directory(), entry)?; Ok(Path::join(Path::new(&context.target_directory()), file_name)) }
40.176471
97
0.713031
117a32e7bc8cd6ef4095bb4b0802d57f29496641
9,504
//! Demangling support for various languages and compilers. //! //! Currently supported languages are: //! //! - C++ (GCC-style compilers and MSVC) //! - Rust (both `legacy` and `v0`) //! - Swift (up to Swift 5.2) //! - ObjC (only symbol detection) //! //! As the demangling schemes for the languages are different, the supported demangling features are //! inconsistent. For example, argument types were not encoded in legacy Rust mangling and thus not //! available in demangled names. //! //! This module is part of the `symbolic` crate and can be enabled via the `demangle` feature. //! //! # Examples //! //! ```rust //! use symbolic_common::{Language, Name}; //! use symbolic_demangle::{Demangle, DemangleOptions}; //! //! let name = Name::new("__ZN3std2io4Read11read_to_end17hb85a0f6802e14499E"); //! assert_eq!(name.detect_language(), Language::Rust); //! assert_eq!(name.try_demangle(DemangleOptions::default()), "std::io::Read::read_to_end"); //! ``` #![warn(missing_docs)] use std::borrow::Cow; use std::ffi::{CStr, CString}; use std::os::raw::{c_char, c_int}; use symbolic_common::{Language, Name}; use cpp_demangle::{DemangleOptions as CppOptions, Symbol as CppSymbol}; use msvc_demangler::DemangleFlags as MsvcFlags; extern "C" { fn symbolic_demangle_swift( sym: *const c_char, buf: *mut c_char, buf_len: usize, simplified: c_int, ) -> c_int; fn symbolic_demangle_is_swift_symbol(sym: *const c_char) -> c_int; } /// Defines the output format for demangling. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum DemangleFormat { /// Strips parameter names (in Swift) and sometimes packages or namespaces. Short, /// Outputs the full demangled name including arguments, return types, generics and packages. Full, } /// Options for [`Demangle::demangle`]. /// /// [`Demangle::demangle`]: trait.Demangle.html#tymethod.demangle #[derive(Clone, Copy, Debug)] pub struct DemangleOptions { /// General format to use for the output. /// /// Defaults to `DemangleFormat::Short`. pub format: DemangleFormat, /// Determines whether function arguments should be demangled. /// /// Defaults to `false`. pub with_arguments: bool, } impl Default for DemangleOptions { fn default() -> DemangleOptions { DemangleOptions { format: DemangleFormat::Short, with_arguments: false, } } } fn is_maybe_objc(ident: &str) -> bool { (ident.starts_with("-[") || ident.starts_with("+[")) && ident.ends_with(']') } fn is_maybe_cpp(ident: &str) -> bool { ident.starts_with("_Z") || ident.starts_with("__Z") || ident.starts_with("___Z") || ident.starts_with("____Z") } fn is_maybe_msvc(ident: &str) -> bool { ident.starts_with('?') || ident.starts_with("@?") } fn is_maybe_swift(ident: &str) -> bool { CString::new(ident) .map(|cstr| unsafe { symbolic_demangle_is_swift_symbol(cstr.as_ptr()) != 0 }) .unwrap_or(false) } fn try_demangle_msvc(ident: &str, opts: DemangleOptions) -> Option<String> { let flags = match opts.format { DemangleFormat::Full => MsvcFlags::COMPLETE, DemangleFormat::Short => { if opts.with_arguments { MsvcFlags::NO_FUNCTION_RETURNS } else { MsvcFlags::NAME_ONLY } } }; msvc_demangler::demangle(ident, flags).ok() } fn try_demangle_cpp(ident: &str, opts: DemangleOptions) -> Option<String> { if is_maybe_msvc(ident) { return try_demangle_msvc(ident, opts); } let symbol = match CppSymbol::new(ident) { Ok(symbol) => symbol, Err(_) => return None, }; let mut cpp_options = CppOptions::new(); if !opts.with_arguments { cpp_options = cpp_options.no_params().no_return_type(); } match symbol.demangle(&cpp_options) { Ok(demangled) => Some(demangled), Err(_) => None, } } fn try_demangle_rust(ident: &str, _opts: DemangleOptions) -> Option<String> { match rustc_demangle::try_demangle(ident) { Ok(demangled) => Some(format!("{:#}", demangled)), Err(_) => None, } } fn try_demangle_swift(ident: &str, opts: DemangleOptions) -> Option<String> { let mut buf = vec![0 as c_char; 4096]; let sym = match CString::new(ident) { Ok(sym) => sym, Err(_) => return None, }; let simplified = match opts.format { DemangleFormat::Short => { if opts.with_arguments { 1 } else { 2 } } DemangleFormat::Full => 0, }; unsafe { match symbolic_demangle_swift(sym.as_ptr(), buf.as_mut_ptr(), buf.len(), simplified) { 0 => None, _ => Some(CStr::from_ptr(buf.as_ptr()).to_string_lossy().to_string()), } } } fn try_demangle_objc(ident: &str, _opts: DemangleOptions) -> Option<String> { Some(ident.to_string()) } fn try_demangle_objcpp(ident: &str, opts: DemangleOptions) -> Option<String> { if is_maybe_objc(ident) { try_demangle_objc(ident, opts) } else if is_maybe_cpp(ident) { try_demangle_cpp(ident, opts) } else { None } } /// An extension trait on `Name` for demangling names. /// /// See the [module level documentation] for a list of supported languages. /// /// [module level documentation]: index.html pub trait Demangle { /// Infers the language of a mangled name. /// /// In case the symbol is not mangled or its language is unknown, the return value will be /// `Language::Unknown`. If the language of the symbol was specified explicitly, this is /// returned instead. For a list of supported languages, see the [module level documentation]. /// /// # Examples /// /// ``` /// use symbolic_common::{Name, Language}; /// use symbolic_demangle::{Demangle, DemangleOptions}; /// /// assert_eq!(Name::new("_ZN3foo3barEv").detect_language(), Language::Cpp); /// assert_eq!(Name::new("unknown").detect_language(), Language::Unknown); /// ``` /// /// [module level documentation]: index.html fn detect_language(&self) -> Language; /// Demangles the name with the given options. /// /// Returns `None` in one of the following cases: /// 1. The language cannot be detected. /// 2. The language is not supported. /// 3. Demangling of the name failed. /// /// # Examples /// /// ``` /// use symbolic_common::Name; /// use symbolic_demangle::{Demangle, DemangleOptions}; /// /// assert_eq!(Name::new("_ZN3foo3barEv").demangle(DemangleOptions::default()), Some("foo::bar".to_string())); /// assert_eq!(Name::new("unknown").demangle(DemangleOptions::default()), None); /// ``` fn demangle(&self, opts: DemangleOptions) -> Option<String>; /// Tries to demangle the name and falls back to the original name. /// /// Similar to [`demangle`], except that it returns a borrowed instance of the original name if /// the name cannot be demangled. /// /// # Examples /// /// ``` /// use symbolic_common::Name; /// use symbolic_demangle::{Demangle, DemangleOptions}; /// /// assert_eq!(Name::new("_ZN3foo3barEv").try_demangle(DemangleOptions::default()), "foo::bar"); /// assert_eq!(Name::new("unknown").try_demangle(DemangleOptions::default()), "unknown"); /// ``` /// /// [`demangle`]: trait.Demangle.html#tymethod.demangle fn try_demangle(&self, opts: DemangleOptions) -> Cow<'_, str>; } impl<'a> Demangle for Name<'a> { fn detect_language(&self) -> Language { if self.language() != Language::Unknown { return self.language(); } if is_maybe_objc(self.as_str()) { return Language::ObjC; } if rustc_demangle::try_demangle(self.as_str()).is_ok() { return Language::Rust; } if is_maybe_cpp(self.as_str()) || is_maybe_msvc(self.as_str()) { return Language::Cpp; } if is_maybe_swift(self.as_str()) { return Language::Swift; } Language::Unknown } fn demangle(&self, opts: DemangleOptions) -> Option<String> { match self.detect_language() { Language::ObjC => try_demangle_objc(self.as_str(), opts), Language::ObjCpp => try_demangle_objcpp(self.as_str(), opts), Language::Rust => try_demangle_rust(self.as_str(), opts), Language::Cpp => try_demangle_cpp(self.as_str(), opts), Language::Swift => try_demangle_swift(self.as_str(), opts), _ => None, } } fn try_demangle(&self, opts: DemangleOptions) -> Cow<'_, str> { match self.demangle(opts) { Some(demangled) => Cow::Owned(demangled), None => Cow::Borrowed(self.as_str()), } } } /// Demangles an identifier and falls back to the original symbol. /// /// This is a shortcut for [`Demangle::try_demangle`] with default options. /// /// # Examples /// /// ``` /// assert_eq!(symbolic_demangle::demangle("_ZN3foo3barEv"), "foo::bar"); /// ``` /// /// [`Demangle::try_demangle`]: trait.Demangle.html#tymethod.try_demangle pub fn demangle(ident: &str) -> Cow<'_, str> { match Name::new(ident).demangle(Default::default()) { Some(demangled) => Cow::Owned(demangled), None => Cow::Borrowed(ident), } }
30.559486
114
0.615109
1ce5332f781e8a9424037dd4a41aaed96ded626e
3,405
//! Generic array are commonly used as a return value for hash digests, so //! it's a good idea to allow to hexlify them easily. This module implements //! `std::fmt::LowerHex` and `std::fmt::UpperHex` traits. //! //! Example: //! //! ```rust //! # #[macro_use] //! # extern crate generic_array; //! # extern crate typenum; //! # fn main() { //! let array = arr![u8; 10, 20, 30]; //! assert_eq!(format!("{:x}", array), "0a141e"); //! # } //! ``` //! use {ArrayLength, GenericArray}; use core::fmt; use core::ops::Add; use core::str; use typenum::*; static LOWER_CHARS: &'static [u8] = b"0123456789abcdef"; static UPPER_CHARS: &'static [u8] = b"0123456789ABCDEF"; impl<T: ArrayLength<u8>> fmt::LowerHex for GenericArray<u8, T> where T: Add<T>, <T as Add<T>>::Output: ArrayLength<u8>, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let max_digits = f.precision().unwrap_or_else(|| self.len()); if T::to_usize() < 1024 { // For small arrays use a stack allocated // buffer of 2x number of bytes let mut res = GenericArray::<u8, Sum<T, T>>::default(); for (i, c) in self.iter().take(max_digits).enumerate() { res[i * 2] = LOWER_CHARS[(c >> 4) as usize]; res[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize]; } f.write_str( unsafe { str::from_utf8_unchecked(&res[..max_digits * 2]) }, )?; } else { // For large array use chunks of up to 1024 bytes (2048 hex chars) let mut buf = [0u8; 2048]; for chunk in self[..max_digits].chunks(1024) { for (i, c) in chunk.iter().enumerate() { buf[i * 2] = LOWER_CHARS[(c >> 4) as usize]; buf[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize]; } f.write_str(unsafe { str::from_utf8_unchecked(&buf[..chunk.len() * 2]) })?; } } Ok(()) } } impl<T: ArrayLength<u8>> fmt::UpperHex for GenericArray<u8, T> where T: Add<T>, <T as Add<T>>::Output: ArrayLength<u8>, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let max_digits = f.precision().unwrap_or_else(|| self.len()); if T::to_usize() < 1024 { // For small arrays use a stack allocated // buffer of 2x number of bytes let mut res = GenericArray::<u8, Sum<T, T>>::default(); for (i, c) in self.iter().take(max_digits).enumerate() { res[i * 2] = UPPER_CHARS[(c >> 4) as usize]; res[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize]; } f.write_str( unsafe { str::from_utf8_unchecked(&res[..max_digits * 2]) }, )?; } else { // For large array use chunks of up to 1024 bytes (2048 hex chars) let mut buf = [0u8; 2048]; for chunk in self[..max_digits].chunks(1024) { for (i, c) in chunk.iter().enumerate() { buf[i * 2] = UPPER_CHARS[(c >> 4) as usize]; buf[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize]; } f.write_str(unsafe { str::from_utf8_unchecked(&buf[..chunk.len() * 2]) })?; } } Ok(()) } }
33.382353
78
0.500147
330fb6f8d6700a2e2c61955f32bb04d94815a3dc
1,206
use std::thread; use std::time; use discord_rpc_client::Client; use sysinfo::{NetworkExt, NetworksExt, ProcessExt, System, SystemExt, Process}; use std::ptr; use winapi::um::wincon::GetConsoleWindow; use winapi::um::winuser::{ShowWindow, SW_HIDE}; fn main() { let window = unsafe {GetConsoleWindow()}; if window != ptr::null_mut() { unsafe { ShowWindow(window, SW_HIDE); } } let mut client = Client::new(850515479725998111).unwrap(); client.start(); loop { if IsZoomOpen() { client.set_activity(|act| act .state("In a Meeting") .assets(|ass| ass .large_image("large") .large_text("Zoom") ) ).expect("Failed to set activity for zoom"); } else { client.clear_activity(); } thread::sleep(time::Duration::from_secs(30)); } } fn IsZoomOpen() -> bool { let mut sys = System::new_all(); for (pdi, process) in sys.get_processes() { if(process.name().starts_with("Zoom") && process.name() != "ZoomRichPresence.exe") { return true; } } false }
24.12
92
0.546434
9095924eb57b2d464cd9a94a6a804753844ce4f9
1,173
extern crate base64; extern crate clap; extern crate execute; extern crate rand; mod codesign; mod options; mod security; use std::fs; use crate::options::SignOptions; use crate::security::Security; use clap::Clap; use std::path::Path; use crate::codesign::Codesign; fn main() { let options: SignOptions = SignOptions::parse(); let decoded_cert = base64::decode(&options.certificate).expect("Could not decode certificate"); let certificate = Path::new("decoded.p12").to_path_buf(); fs::write(&certificate, decoded_cert).expect("Could not export decoded certificate"); let mut security = Security::new(&certificate, options.password.clone()); security.delete_keychain(); security.create_keychain(); security.add_keychain_to_user_domain(); security.set_keychain_settings(); security.unlock_keychain(); security.import_keychain(); security.set_key_partition_list(); options.with_signing_identity_and_entitlements(|signing_identity, entitlements| { let codesign = Codesign::new(signing_identity, entitlements); codesign.sign(options.artefact.as_path()); }); security.delete_keychain(); }
25.5
99
0.7289
e5eea5c5356d2cf5f8479be25679525781c13c5d
583
use kepler::{app, config, tracing_try_init}; use rocket::figment::providers::{Env, Format, Serialized, Toml}; #[rocket::main] async fn main() { tracing_try_init(); let config = rocket::figment::Figment::from(rocket::Config::default()) .merge(Serialized::defaults(config::Config::default())) .merge(Toml::file("kepler.toml").nested()) .merge(Env::prefixed("KEPLER_").split("_").global()) .merge(Env::prefixed("ROCKET_").global()); // That's just for easy access to ROCKET_LOG_LEVEL app(&config).await.unwrap().launch().await.unwrap(); }
36.4375
101
0.656947
bff305fadb2c5f6574be8f050487704e3614dbed
16,995
/*! # juniper_iron This repository contains the [Iron][Iron] web framework integration for [Juniper][Juniper], a [GraphQL][GraphQL] implementation for Rust. For documentation, including guides and examples, check out [Juniper][Juniper]. A basic usage example can also be found in the [Api documentation][documentation]. ## Links * [Juniper][Juniper] * [Api Reference][documentation] * [Iron framework][Iron] ## Integrating with Iron For example, continuing from the schema created above and using Iron to expose the schema on an HTTP endpoint supporting both GET and POST requests: ```rust,no_run extern crate iron; # #[macro_use] extern crate juniper; # extern crate juniper_iron; # use std::collections::HashMap; use iron::prelude::*; use juniper_iron::GraphQLHandler; use juniper::{Context, EmptyMutation}; # use juniper::FieldResult; # # struct User { id: String, name: String, friend_ids: Vec<String> } # struct QueryRoot; # struct Database { users: HashMap<String, User> } # # graphql_object!(User: Database |&self| { # field id() -> FieldResult<&String> { # Ok(&self.id) # } # # field name() -> FieldResult<&String> { # Ok(&self.name) # } # # field friends(&executor) -> FieldResult<Vec<&User>> { # Ok(self.friend_ids.iter() # .filter_map(|id| executor.context().users.get(id)) # .collect()) # } # }); # # graphql_object!(QueryRoot: Database |&self| { # field user(&executor, id: String) -> FieldResult<Option<&User>> { # Ok(executor.context().users.get(&id)) # } # }); // This function is executed for every request. Here, we would realistically // provide a database connection or similar. For this example, we'll be // creating the database from scratch. fn context_factory(_: &mut Request) -> IronResult<Database> { Ok(Database { users: vec![ ( "1000".to_owned(), User { id: "1000".to_owned(), name: "Robin".to_owned(), friend_ids: vec!["1001".to_owned()] } ), ( "1001".to_owned(), User { id: "1001".to_owned(), name: "Max".to_owned(), friend_ids: vec!["1000".to_owned()] } ), ].into_iter().collect() }) } impl Context for Database {} fn main() { // GraphQLHandler takes a context factory function, the root object, // and the mutation object. If we don't have any mutations to expose, we // can use the empty tuple () to indicate absence. let graphql_endpoint = GraphQLHandler::new( context_factory, QueryRoot, EmptyMutation::<Database>::new()); // Start serving the schema at the root on port 8080. Iron::new(graphql_endpoint).http("localhost:8080").unwrap(); } ``` See the the [`GraphQLHandler`][3] documentation for more information on what request methods are supported. [3]: ./struct.GraphQLHandler.html [Iron]: https://github.com/iron/iron [Juniper]: https://github.com/graphql-rust/juniper [GraphQL]: http://graphql.org [documentation]: https://docs.rs/juniper_iron */ #[macro_use] extern crate iron; #[cfg(test)] extern crate iron_test; extern crate juniper; extern crate serde_json; #[macro_use] extern crate serde_derive; #[cfg(test)] extern crate url; extern crate urlencoded; use iron::method; use iron::middleware::Handler; use iron::mime::Mime; use iron::prelude::*; use iron::status; use urlencoded::{UrlDecodingError, UrlEncodedQuery}; use std::error::Error; use std::fmt; use std::io::Read; use serde_json::error::Error as SerdeError; use juniper::http; use juniper::serde::Deserialize; use juniper::{DefaultScalarValue, GraphQLType, InputValue, RootNode, ScalarRefValue, ScalarValue}; #[derive(Deserialize)] #[serde(untagged)] #[serde(bound = "InputValue<S>: Deserialize<'de>")] enum GraphQLBatchRequest<S = DefaultScalarValue> where S: ScalarValue, { Single(http::GraphQLRequest<S>), Batch(Vec<http::GraphQLRequest<S>>), } #[derive(Serialize)] #[serde(untagged)] enum GraphQLBatchResponse<'a, S = DefaultScalarValue> where S: ScalarValue, { Single(http::GraphQLResponse<'a, S>), Batch(Vec<http::GraphQLResponse<'a, S>>), } impl<S> GraphQLBatchRequest<S> where S: ScalarValue, for<'b> &'b S: ScalarRefValue<'b>, { pub fn execute<'a, CtxT, QueryT, MutationT>( &'a self, root_node: &'a RootNode<QueryT, MutationT, S>, context: &CtxT, ) -> GraphQLBatchResponse<'a, S> where QueryT: GraphQLType<S, Context = CtxT>, MutationT: GraphQLType<S, Context = CtxT>, { match self { &GraphQLBatchRequest::Single(ref request) => { GraphQLBatchResponse::Single(request.execute(root_node, context)) } &GraphQLBatchRequest::Batch(ref requests) => GraphQLBatchResponse::Batch( requests .iter() .map(|request| request.execute(root_node, context)) .collect(), ), } } } impl<'a, S> GraphQLBatchResponse<'a, S> where S: ScalarValue, { fn is_ok(&self) -> bool { match self { &GraphQLBatchResponse::Single(ref response) => response.is_ok(), &GraphQLBatchResponse::Batch(ref responses) => responses .iter() .fold(true, |ok, response| ok && response.is_ok()), } } } /// Handler that executes `GraphQL` queries in the given schema /// /// The handler responds to GET requests and POST requests only. In GET /// requests, the query should be supplied in the `query` URL parameter, e.g. /// `http://localhost:3000/graphql?query={hero{name}}`. /// /// POST requests support both queries and variables. POST a JSON document to /// this endpoint containing the field `"query"` and optionally `"variables"`. /// The variables should be a JSON object containing the variable to value /// mapping. pub struct GraphQLHandler<'a, CtxFactory, Query, Mutation, CtxT, S = DefaultScalarValue> where S: ScalarValue, for<'b> &'b S: ScalarRefValue<'b>, CtxFactory: Fn(&mut Request) -> IronResult<CtxT> + Send + Sync + 'static, CtxT: 'static, Query: GraphQLType<S, Context = CtxT> + Send + Sync + 'static, Mutation: GraphQLType<S, Context = CtxT> + Send + Sync + 'static, { context_factory: CtxFactory, root_node: RootNode<'a, Query, Mutation, S>, } /// Handler that renders `GraphiQL` - a graphical query editor interface pub struct GraphiQLHandler { graphql_url: String, } /// Handler that renders `GraphQL Playground` - a graphical query editor interface pub struct PlaygroundHandler { graphql_url: String, } fn get_single_value<T>(mut values: Vec<T>) -> IronResult<T> { if values.len() == 1 { Ok(values.remove(0)) } else { Err(GraphQLIronError::InvalidData("Duplicate URL query parameter").into()) } } fn parse_url_param(params: Option<Vec<String>>) -> IronResult<Option<String>> { if let Some(values) = params { get_single_value(values).map(Some) } else { Ok(None) } } fn parse_variable_param<S>(params: Option<Vec<String>>) -> IronResult<Option<InputValue<S>>> where S: ScalarValue, { if let Some(values) = params { Ok( serde_json::from_str::<InputValue<S>>(get_single_value(values)?.as_ref()) .map(Some) .map_err(GraphQLIronError::Serde)?, ) } else { Ok(None) } } impl<'a, CtxFactory, Query, Mutation, CtxT, S> GraphQLHandler<'a, CtxFactory, Query, Mutation, CtxT, S> where S: ScalarValue + 'a, for<'b> &'b S: ScalarRefValue<'b>, CtxFactory: Fn(&mut Request) -> IronResult<CtxT> + Send + Sync + 'static, CtxT: 'static, Query: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static, Mutation: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static, { /// Build a new GraphQL handler /// /// The context factory will receive the Iron request object and is /// expected to construct a context object for the given schema. This can /// be used to construct e.g. database connections or similar data that /// the schema needs to execute the query. pub fn new(context_factory: CtxFactory, query: Query, mutation: Mutation) -> Self { GraphQLHandler { context_factory: context_factory, root_node: RootNode::new(query, mutation), } } fn handle_get(&self, req: &mut Request) -> IronResult<GraphQLBatchRequest<S>> { let url_query_string = req .get_mut::<UrlEncodedQuery>() .map_err(GraphQLIronError::Url)?; let input_query = parse_url_param(url_query_string.remove("query"))? .ok_or_else(|| GraphQLIronError::InvalidData("No query provided"))?; let operation_name = parse_url_param(url_query_string.remove("operationName"))?; let variables = parse_variable_param(url_query_string.remove("variables"))?; Ok(GraphQLBatchRequest::Single(http::GraphQLRequest::new( input_query, operation_name, variables, ))) } fn handle_post(&self, req: &mut Request) -> IronResult<GraphQLBatchRequest<S>> { let mut request_payload = String::new(); itry!(req.body.read_to_string(&mut request_payload)); Ok( serde_json::from_str::<GraphQLBatchRequest<S>>(request_payload.as_str()) .map_err(GraphQLIronError::Serde)?, ) } fn execute(&self, context: &CtxT, request: GraphQLBatchRequest<S>) -> IronResult<Response> { let response = request.execute(&self.root_node, context); let content_type = "application/json".parse::<Mime>().unwrap(); let json = serde_json::to_string_pretty(&response).unwrap(); let status = if response.is_ok() { status::Ok } else { status::BadRequest }; Ok(Response::with((content_type, status, json))) } } impl GraphiQLHandler { /// Build a new GraphiQL handler targeting the specified URL. /// /// The provided URL should point to the URL of the attached `GraphQLHandler`. It can be /// relative, so a common value could be `"/graphql"`. pub fn new(graphql_url: &str) -> GraphiQLHandler { GraphiQLHandler { graphql_url: graphql_url.to_owned(), } } } impl PlaygroundHandler { /// Build a new GraphQL Playground handler targeting the specified URL. /// /// The provided URL should point to the URL of the attached `GraphQLHandler`. It can be /// relative, so a common value could be `"/graphql"`. pub fn new(graphql_url: &str) -> PlaygroundHandler { PlaygroundHandler { graphql_url: graphql_url.to_owned(), } } } impl<'a, CtxFactory, Query, Mutation, CtxT, S> Handler for GraphQLHandler<'a, CtxFactory, Query, Mutation, CtxT, S> where S: ScalarValue + Sync + Send + 'static, for<'b> &'b S: ScalarRefValue<'b>, CtxFactory: Fn(&mut Request) -> IronResult<CtxT> + Send + Sync + 'static, CtxT: 'static, Query: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static, Mutation: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static, 'a: 'static, { fn handle(&self, mut req: &mut Request) -> IronResult<Response> { let context = (self.context_factory)(req)?; let graphql_request = match req.method { method::Get => self.handle_get(&mut req)?, method::Post => self.handle_post(&mut req)?, _ => return Ok(Response::with(status::MethodNotAllowed)), }; self.execute(&context, graphql_request) } } impl Handler for GraphiQLHandler { fn handle(&self, _: &mut Request) -> IronResult<Response> { let content_type = "text/html; charset=utf-8".parse::<Mime>().unwrap(); Ok(Response::with(( content_type, status::Ok, juniper::graphiql::graphiql_source(&self.graphql_url), ))) } } impl Handler for PlaygroundHandler { fn handle(&self, _: &mut Request) -> IronResult<Response> { let content_type = "text/html; charset=utf-8".parse::<Mime>().unwrap(); Ok(Response::with(( content_type, status::Ok, juniper::http::playground::playground_source(&self.graphql_url), ))) } } #[derive(Debug)] enum GraphQLIronError { Serde(SerdeError), Url(UrlDecodingError), InvalidData(&'static str), } impl fmt::Display for GraphQLIronError { fn fmt(&self, mut f: &mut fmt::Formatter) -> fmt::Result { match *self { GraphQLIronError::Serde(ref err) => fmt::Display::fmt(err, &mut f), GraphQLIronError::Url(ref err) => fmt::Display::fmt(err, &mut f), GraphQLIronError::InvalidData(err) => fmt::Display::fmt(err, &mut f), } } } impl Error for GraphQLIronError { fn description(&self) -> &str { match *self { GraphQLIronError::Serde(ref err) => err.description(), GraphQLIronError::Url(ref err) => err.description(), GraphQLIronError::InvalidData(err) => err, } } fn cause(&self) -> Option<&Error> { match *self { GraphQLIronError::Serde(ref err) => Some(err), GraphQLIronError::Url(ref err) => Some(err), GraphQLIronError::InvalidData(_) => None, } } } impl From<GraphQLIronError> for IronError { fn from(err: GraphQLIronError) -> IronError { let message = format!("{}", err); IronError::new(err, (status::BadRequest, message)) } } #[cfg(test)] mod tests { use super::*; use iron::Url; use iron::{Handler, Headers}; use iron_test::{request, response}; use url::percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET}; use juniper::http::tests as http_tests; use juniper::tests::model::Database; use juniper::EmptyMutation; use super::GraphQLHandler; // This is ugly but it works. `iron_test` just dumps the path/url in headers // and newer `hyper` doesn't allow unescaped "{" or "}". fn fixup_url(url: &str) -> String { let url = Url::parse(&format!("http://localhost:3000{}", url)).expect("url to parse"); let path: String = url .path() .iter() .map(|x| x.to_string()) .collect::<Vec<String>>() .join("/"); format!( "http://localhost:3000{}?{}", path, utf8_percent_encode(url.query().unwrap_or(""), DEFAULT_ENCODE_SET) ) } struct TestIronIntegration; impl http_tests::HTTPIntegration for TestIronIntegration { fn get(&self, url: &str) -> http_tests::TestResponse { let result = request::get(&fixup_url(url), Headers::new(), &make_handler()); match result { Ok(response) => make_test_response(response), Err(e) => make_test_error_response(e), } } fn post(&self, url: &str, body: &str) -> http_tests::TestResponse { let result = request::post(&fixup_url(url), Headers::new(), body, &make_handler()); match result { Ok(response) => make_test_response(response), Err(e) => make_test_error_response(e), } } } #[test] fn test_iron_integration() { let integration = TestIronIntegration; http_tests::run_http_test_suite(&integration); } fn context_factory(_: &mut Request) -> IronResult<Database> { Ok(Database::new()) } fn make_test_error_response(_: IronError) -> http_tests::TestResponse { // For now all errors return the same status code. // `juniper_iron` users can choose to do something different if desired. http_tests::TestResponse { status_code: 400, body: None, content_type: "application/json".to_string(), } } fn make_test_response(response: Response) -> http_tests::TestResponse { let status_code = response .status .expect("No status code returned from handler") .to_u16() as i32; let content_type = String::from_utf8( response .headers .get_raw("content-type") .expect("No content type header from handler")[0] .clone(), ) .expect("Content-type header invalid UTF-8"); let body = response::extract_body_to_string(response); http_tests::TestResponse { status_code: status_code, body: Some(body), content_type: content_type, } } fn make_handler() -> Box<Handler> { Box::new(GraphQLHandler::new( context_factory, Database::new(), EmptyMutation::<Database>::new(), )) } }
31.530612
98
0.617358
6964b8292d14b203d4e427916b593c0597f5402f
1,425
//! Error module #![cfg(feature = "alloc")] use alloc::{format, string::String, string::ToString}; /// Deku errors #[derive(Debug, PartialEq)] pub enum DekuError { /// Parsing error when reading Parse(String), /// Invalid parameter InvalidParam(String), /// Unexpected error Unexpected(String), } impl From<core::num::TryFromIntError> for DekuError { fn from(e: core::num::TryFromIntError) -> DekuError { DekuError::Parse(format!("error parsing int: {}", e.to_string())) } } impl From<core::array::TryFromSliceError> for DekuError { fn from(e: core::array::TryFromSliceError) -> DekuError { DekuError::Parse(format!("error parsing from slice: {}", e.to_string())) } } impl From<core::convert::Infallible> for DekuError { fn from(_e: core::convert::Infallible) -> DekuError { unreachable!(); } } impl core::fmt::Display for DekuError { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match *self { DekuError::Parse(ref err) => write!(f, "Parse error: {}", err), DekuError::InvalidParam(ref err) => write!(f, "Invalid param error: {}", err), DekuError::Unexpected(ref err) => write!(f, "Unexpected error: {}", err), } } } #[cfg(feature = "std")] impl std::error::Error for DekuError { fn cause(&self) -> Option<&dyn std::error::Error> { Some(self) } }
27.403846
90
0.610526
148eaeae85c8280b133a23a279653cbba33e34bf
954
use crate::encode::{Encode, IsNull}; use crate::mssql::protocol::type_info::{DataType, TypeInfo}; use crate::mssql::{Mssql, MssqlTypeInfo}; mod bool; mod float; mod int; mod str; impl<'q, T: 'q + Encode<'q, Mssql>> Encode<'q, Mssql> for Option<T> { fn encode(self, buf: &mut Vec<u8>) -> IsNull { if let Some(v) = self { v.encode(buf) } else { IsNull::Yes } } fn encode_by_ref(&self, buf: &mut Vec<u8>) -> IsNull { if let Some(v) = self { v.encode_by_ref(buf) } else { IsNull::Yes } } fn produces(&self) -> Option<MssqlTypeInfo> { if let Some(v) = self { v.produces() } else { // MSSQL requires a special NULL type ID Some(MssqlTypeInfo(TypeInfo::new(DataType::Null, 0))) } } fn size_hint(&self) -> usize { self.as_ref().map_or(0, Encode::size_hint) } }
23.85
69
0.525157
d611e956ecba729cc71f33dae7cc7192687012fe
223
pub trait UnwrapOrUnknownExt { fn unwrap_or_unknown(self) -> String; } impl UnwrapOrUnknownExt for Option<String> { fn unwrap_or_unknown(self) -> String { self.unwrap_or_else(|| "<unknown>".into()) } }
22.3
50
0.672646
ac2429b16efcc06df71286656c3cbb6844f31df7
2,417
#![feature(try_from)] #[macro_use] extern crate serde_derive; mod local; mod map; mod records; pub use self::local::*; pub use self::map::*; pub use self::records::*; pub use noria::DataType; pub trait SizeOf { fn deep_size_of(&self) -> u64; fn size_of(&self) -> u64; } impl SizeOf for DataType { fn deep_size_of(&self) -> u64 { use std::mem::size_of_val; let inner = match *self { DataType::Text(ref t) => size_of_val(t) as u64 + t.to_bytes().len() as u64, _ => 0u64, }; self.size_of() + inner } fn size_of(&self) -> u64 { use std::mem::size_of; // doesn't include data if stored externally size_of::<DataType>() as u64 } } impl SizeOf for Vec<DataType> { fn deep_size_of(&self) -> u64 { use std::mem::size_of_val; size_of_val(self) as u64 + self.iter().fold(0u64, |acc, d| acc + d.deep_size_of()) } fn size_of(&self) -> u64 { use std::mem::{size_of, size_of_val}; size_of_val(self) as u64 + size_of::<DataType>() as u64 * self.len() as u64 } } #[cfg(test)] mod tests { use super::*; #[test] fn data_type_mem_size() { use arccstr::ArcCStr; use chrono::NaiveDateTime; use std::convert::TryFrom; use std::mem::{size_of, size_of_val}; let txt: DataType = DataType::Text(ArcCStr::try_from("hi").unwrap()); let shrt = DataType::Int(5); let long = DataType::BigInt(5); let time = DataType::Timestamp(NaiveDateTime::from_timestamp(0, 42_000_000)); let rec = vec![DataType::Int(5), "asdfasdfasdfasdf".into(), "asdf".into()]; // DataType should always use 16 bytes itself assert_eq!(size_of::<DataType>(), 16); assert_eq!(size_of_val(&txt), 16); assert_eq!(size_of_val(&txt) as u64, txt.size_of()); assert_eq!(txt.deep_size_of(), txt.size_of() + 8 + 2); // DataType + ArcCStr's ptr + 2 chars assert_eq!(size_of_val(&shrt), 16); assert_eq!(size_of_val(&long), 16); assert_eq!(size_of_val(&time), 16); assert_eq!(size_of_val(&time) as u64, time.size_of()); assert_eq!(time.deep_size_of(), 16); // DataType + inline NaiveDateTime assert_eq!(size_of_val(&rec), 24); assert_eq!(rec.size_of(), 24 + 3 * 16); assert_eq!(rec.deep_size_of(), 24 + 3 * 16 + (8 + 16)); } }
27.465909
100
0.584195
9bda4c8345d6a0d6d4472796ee6d39f560512583
224
pub mod cli; pub mod config; pub mod config_builder; pub mod config_consts; pub mod domain_updater; pub mod humantime_wrapper_serde; pub mod ip_fetcher; pub mod logger; pub mod signal_handlers; pub mod token; pub mod types;
18.666667
32
0.803571
1e2856c49db92c2d730721a00d4c214b3f373f63
15,972
use super::SuiteResult; use serde::{Deserialize, Serialize}; use std::{ env, fs, io::{self, BufReader, BufWriter}, path::Path, }; /// Structure to store full result information. #[derive(Debug, Clone, Deserialize, Serialize)] struct ResultInfo { #[serde(rename = "c")] commit: Box<str>, #[serde(rename = "u")] test262_commit: Box<str>, #[serde(rename = "r")] results: SuiteResult, } /// Structure to store full result information. #[derive(Debug, Clone, Deserialize, Serialize)] struct ReducedResultInfo { #[serde(rename = "c")] commit: Box<str>, #[serde(rename = "u")] test262_commit: Box<str>, #[serde(rename = "t")] total: usize, #[serde(rename = "o")] passed: usize, #[serde(rename = "i")] ignored: usize, #[serde(rename = "p")] panic: usize, } impl From<ResultInfo> for ReducedResultInfo { /// Creates a new reduced suite result from a full suite result. fn from(info: ResultInfo) -> Self { Self { commit: info.commit, test262_commit: info.test262_commit, total: info.results.total, passed: info.results.passed, ignored: info.results.ignored, panic: info.results.panic, } } } #[derive(Debug, Clone, Deserialize, Serialize)] struct FeaturesInfo { #[serde(rename = "c")] commit: Box<str>, #[serde(rename = "u")] test262_commit: Box<str>, #[serde(rename = "n")] suite_name: Box<str>, #[serde(rename = "f")] features: Vec<String>, } fn remove_duplicates(features_vec: &[String]) -> Vec<String> { let mut result = features_vec.to_vec(); result.sort(); result.dedup(); result } impl From<ResultInfo> for FeaturesInfo { fn from(info: ResultInfo) -> Self { Self { commit: info.commit, test262_commit: info.test262_commit, suite_name: info.results.name, features: remove_duplicates(&info.results.features), } } } /// File name of the "latest results" JSON file. const LATEST_FILE_NAME: &str = "latest.json"; /// File name of the "all results" JSON file. const RESULTS_FILE_NAME: &str = "results.json"; /// File name of the "features" JSON file. const FEATURES_FILE_NAME: &str = "features.json"; /// Writes the results of running the test suite to the given JSON output file. /// /// It will append the results to the ones already present, in an array. pub(crate) fn write_json( results: SuiteResult, output: Option<&Path>, verbose: u8, ) -> io::Result<()> { if let Some(path) = output { let mut branch = env::var("GITHUB_REF").unwrap_or_default(); if branch.starts_with("refs/pull") { branch = "pull".to_owned(); } let path = if branch.is_empty() { path.to_path_buf() } else { let folder = path.join(branch); fs::create_dir_all(&folder)?; folder }; // We make sure we are using the latest commit information in GitHub pages: update_gh_pages_repo(path.as_path(), verbose); if verbose != 0 { println!("Writing the results to {}...", path.display()); } // Write the latest results. let latest_path = path.join(LATEST_FILE_NAME); let new_results = ResultInfo { commit: env::var("GITHUB_SHA").unwrap_or_default().into_boxed_str(), test262_commit: get_test262_commit(), results, }; let latest_output = BufWriter::new(fs::File::create(latest_path)?); serde_json::to_writer(latest_output, &new_results)?; // Write the full list of results, retrieving the existing ones first. let all_path = path.join(RESULTS_FILE_NAME); let mut all_results: Vec<ReducedResultInfo> = if all_path.exists() { serde_json::from_reader(BufReader::new(fs::File::open(&all_path)?))? } else { Vec::new() }; all_results.push(new_results.clone().into()); let output = BufWriter::new(fs::File::create(&all_path)?); serde_json::to_writer(output, &all_results)?; if verbose != 0 { println!("Results written correctly"); } // Write the full list of features, existing features go first. let features_path = path.join(FEATURES_FILE_NAME); let mut all_features: Vec<FeaturesInfo> = if features_path.exists() { serde_json::from_reader(BufReader::new(fs::File::open(&features_path)?))? } else { Vec::new() }; all_features.push(new_results.into()); let features_output = BufWriter::new(fs::File::create(&features_path)?); serde_json::to_writer(features_output, &all_features)?; if verbose != 0 { println!("Features written correctly"); } } Ok(()) } /// Gets the commit OID of the test262 submodule. fn get_test262_commit() -> Box<str> { let mut commit_id = fs::read_to_string(".git/modules/test262/HEAD") .expect("did not find git submodule ref at '.git/modules/test262/HEAD'"); // Remove newline. commit_id.pop(); commit_id.into_boxed_str() } /// Updates the GitHub pages repository by pulling latest changes before writing the new things. fn update_gh_pages_repo(path: &Path, verbose: u8) { if env::var("GITHUB_REF").is_ok() { use std::process::Command; // We run the command to pull the gh-pages branch: git -C ../gh-pages/ pull origin Command::new("git") .args(&["-C", "../gh-pages", "pull", "--ff-only"]) .output() .expect("could not update GitHub Pages"); // Copy the full results file let from = Path::new("../gh-pages/test262/refs/heads/main/").join(RESULTS_FILE_NAME); let to = path.join(RESULTS_FILE_NAME); if verbose != 0 { println!( "Copying the {} file to {} in order to add the results", from.display(), to.display() ); } fs::copy(from, to).expect("could not copy the main results file"); } } /// Compares the results of two test suite runs. pub(crate) fn compare_results(base: &Path, new: &Path, markdown: bool) { let base_results: ResultInfo = serde_json::from_reader(BufReader::new( fs::File::open(base).expect("could not open the base results file"), )) .expect("could not read the base results"); let new_results: ResultInfo = serde_json::from_reader(BufReader::new( fs::File::open(new).expect("could not open the new results file"), )) .expect("could not read the new results"); let base_total = base_results.results.total as isize; let new_total = new_results.results.total as isize; let total_diff = new_total - base_total; let base_passed = base_results.results.passed as isize; let new_passed = new_results.results.passed as isize; let passed_diff = new_passed - base_passed; let base_ignored = base_results.results.ignored as isize; let new_ignored = new_results.results.ignored as isize; let ignored_diff = new_ignored - base_ignored; let base_failed = base_total - base_passed - base_ignored; let new_failed = new_total - new_passed - new_ignored; let failed_diff = new_failed - base_failed; let base_panics = base_results.results.panic as isize; let new_panics = new_results.results.panic as isize; let panic_diff = new_panics - base_panics; let base_conformance = (base_passed as f64 / base_total as f64) * 100_f64; let new_conformance = (new_passed as f64 / new_total as f64) * 100_f64; let conformance_diff = new_conformance - base_conformance; let test_diff = compute_result_diff(Path::new(""), &base_results.results, &new_results.results); if markdown { use num_format::{Locale, ToFormattedString}; /// Generates a proper diff format, with some bold text if things change. fn diff_format(diff: isize) -> String { format!( "{}{}{}{}", if diff == 0 { "" } else { "**" }, if diff > 0 { "+" } else { "" }, diff.to_formatted_string(&Locale::en), if diff == 0 { "" } else { "**" } ) } println!("#### VM implementation"); println!("| Test result | main count | PR count | difference |"); println!("| :---------: | :----------: | :------: | :--------: |"); println!( "| Total | {} | {} | {} |", base_total.to_formatted_string(&Locale::en), new_total.to_formatted_string(&Locale::en), diff_format(total_diff), ); println!( "| Passed | {} | {} | {} |", base_passed.to_formatted_string(&Locale::en), new_passed.to_formatted_string(&Locale::en), diff_format(passed_diff), ); println!( "| Ignored | {} | {} | {} |", base_ignored.to_formatted_string(&Locale::en), new_ignored.to_formatted_string(&Locale::en), diff_format(ignored_diff), ); println!( "| Failed | {} | {} | {} |", base_failed.to_formatted_string(&Locale::en), new_failed.to_formatted_string(&Locale::en), diff_format(failed_diff), ); println!( "| Panics | {} | {} | {} |", base_panics.to_formatted_string(&Locale::en), new_panics.to_formatted_string(&Locale::en), diff_format(panic_diff), ); println!( "| Conformance | {:.2}% | {:.2}% | {}{}{:.2}%{} |", base_conformance, new_conformance, if conformance_diff.abs() > f64::EPSILON { "**" } else { "" }, if conformance_diff > 0_f64 { "+" } else { "" }, conformance_diff, if conformance_diff.abs() > f64::EPSILON { "**" } else { "" }, ); if !test_diff.fixed.is_empty() { println!(); println!( "<details><summary><b>Fixed tests ({}):</b></summary>", test_diff.fixed.len() ); println!("\n```"); for test in test_diff.fixed { println!("{test}"); } println!("```"); println!("</details>"); } if !test_diff.broken.is_empty() { println!(); println!( "<details><summary><b>Broken tests ({}):</b></summary>", test_diff.broken.len() ); println!("\n```"); for test in test_diff.broken { println!("{test}"); } println!("```"); println!("</details>"); } if !test_diff.new_panics.is_empty() { println!(); println!( "<details><summary><b>New panics ({}):</b></summary>", test_diff.new_panics.len() ); println!("\n```"); for test in test_diff.new_panics { println!("{test}"); } println!("```"); println!("</details>"); } if !test_diff.panic_fixes.is_empty() { println!(); println!( "<details><summary><b>Fixed panics ({}):</b></summary>", test_diff.panic_fixes.len() ); println!("\n```"); for test in test_diff.panic_fixes { println!("{test}"); } println!("```"); println!("</details>"); } } else { println!("Test262 conformance changes:"); println!("| Test result | main | PR | difference |"); println!( "| Passed | {base_passed:^6} | {new_passed:^5} | {:^10} |", base_passed - new_passed ); println!( "| Ignored | {base_ignored:^6} | {new_ignored:^5} | {:^10} |", base_ignored - new_ignored ); println!( "| Failed | {base_failed:^6} | {new_failed:^5} | {:^10} |", base_failed - new_failed, ); println!( "| Panics | {base_panics:^6} | {new_panics:^5} | {:^10} |", base_panics - new_panics ); if !test_diff.fixed.is_empty() { println!(); println!("Fixed tests ({}):", test_diff.fixed.len()); for test in test_diff.fixed { println!("{test}"); } } if !test_diff.broken.is_empty() { println!(); println!("Broken tests ({}):", test_diff.broken.len()); for test in test_diff.broken { println!("{test}"); } } if !test_diff.new_panics.is_empty() { println!(); println!("New panics ({}):", test_diff.new_panics.len()); for test in test_diff.new_panics { println!("{test}"); } } if !test_diff.panic_fixes.is_empty() { println!(); println!("Fixed panics ({}):", test_diff.panic_fixes.len()); for test in test_diff.panic_fixes { println!("{test}"); } } } } /// Test differences. #[derive(Debug, Clone, Default)] struct ResultDiff { fixed: Vec<Box<str>>, broken: Vec<Box<str>>, new_panics: Vec<Box<str>>, panic_fixes: Vec<Box<str>>, } impl ResultDiff { /// Extends the diff with new results. fn extend(&mut self, new: Self) { self.fixed.extend(new.fixed); self.broken.extend(new.broken); self.new_panics.extend(new.new_panics); self.panic_fixes.extend(new.panic_fixes); } } /// Compares a base and a new result and returns the list of differences. fn compute_result_diff( base: &Path, base_result: &SuiteResult, new_result: &SuiteResult, ) -> ResultDiff { use super::TestOutcomeResult; let mut final_diff = ResultDiff::default(); for base_test in &base_result.tests { if let Some(new_test) = new_result .tests .iter() .find(|new_test| new_test.strict == base_test.strict && new_test.name == base_test.name) { let test_name = format!( "test/{}/{}.js {}(previously {:?})", base.display(), new_test.name, if base_test.strict { "[strict mode] " } else { "" }, base_test.result ) .into_boxed_str(); #[allow(clippy::match_same_arms)] match (base_test.result, new_test.result) { (a, b) if a == b => {} (TestOutcomeResult::Ignored, TestOutcomeResult::Failed) => {} (_, TestOutcomeResult::Passed) => final_diff.fixed.push(test_name), (TestOutcomeResult::Panic, _) => final_diff.panic_fixes.push(test_name), (_, TestOutcomeResult::Failed) => final_diff.broken.push(test_name), (_, TestOutcomeResult::Panic) => final_diff.new_panics.push(test_name), _ => {} } } } for base_suite in &base_result.suites { if let Some(new_suite) = new_result .suites .iter() .find(|new_suite| new_suite.name == base_suite.name) { let new_base = base.join(new_suite.name.as_ref()); let diff = compute_result_diff(new_base.as_path(), base_suite, new_suite); final_diff.extend(diff); } } final_diff }
31.88024
100
0.539006
1189772ba86a5ea0887d947e22b4b4f9b23b406d
13,165
// Copyright 2020, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{fmt, io, iter::repeat_with, sync::Arc}; use futures::{Sink, SinkExt, Stream, StreamExt}; use tari_common::configuration::Network; use tari_comms::{ connectivity::{ConnectivityEvent, ConnectivityEventTx}, framing, memsocket::MemorySocket, message::MessageExt, peer_manager::PeerFeatures, protocol::{ProtocolEvent, ProtocolNotification, ProtocolNotificationTx}, test_utils::{mocks::create_peer_connection_mock_pair, node_identity::build_node_identity}, Bytes, BytesMut, }; use tari_utilities::ByteArray; use tokio::{ sync::{broadcast, mpsc}, task, }; use crate::{ consensus::ConsensusManager, mempool::{ proto, sync_protocol::{MempoolPeerProtocol, MempoolSyncProtocol, MAX_FRAME_SIZE, MEMPOOL_SYNC_PROTOCOL}, Mempool, }, transactions::{tari_amount::uT, test_helpers::create_tx, transaction_components::Transaction}, validation::mocks::MockValidator, }; pub fn create_transactions(n: usize) -> Vec<Transaction> { repeat_with(|| { let (transaction, _, _) = create_tx(5000 * uT, 3 * uT, 1, 2, 1, 3, Default::default()); transaction }) .take(n) .collect() } async fn new_mempool_with_transactions(n: usize) -> (Mempool, Vec<Transaction>) { let mempool = Mempool::new( Default::default(), ConsensusManager::builder(Network::LocalNet).build(), Box::new(MockValidator::new(true)), ); let transactions = create_transactions(n); for txn in &transactions { mempool.insert(Arc::new(txn.clone())).await.unwrap(); } (mempool, transactions) } async fn setup( num_txns: usize, ) -> ( ProtocolNotificationTx<MemorySocket>, ConnectivityEventTx, Mempool, Vec<Transaction>, ) { let (protocol_notif_tx, protocol_notif_rx) = mpsc::channel(1); let (connectivity_events_tx, connectivity_events_rx) = broadcast::channel(10); let (mempool, transactions) = new_mempool_with_transactions(num_txns).await; let protocol = MempoolSyncProtocol::new( Default::default(), protocol_notif_rx, connectivity_events_rx, mempool.clone(), ); task::spawn(protocol.run()); (protocol_notif_tx, connectivity_events_tx, mempool, transactions) } #[tokio::test] async fn empty_set() { let (_, connectivity_events_tx, mempool1, _) = setup(0).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx .send(ConnectivityEvent::PeerConnected(node2_conn)) .unwrap(); let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); let (mempool2, _) = new_mempool_with_transactions(0).await; MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_responder() .await .unwrap(); let transactions = mempool2.snapshot().await.unwrap(); assert_eq!(transactions.len(), 0); let transactions = mempool1.snapshot().await.unwrap(); assert_eq!(transactions.len(), 0); } #[tokio::test] async fn synchronise() { let (_, connectivity_events_tx, mempool1, transactions1) = setup(5).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx .send(ConnectivityEvent::PeerConnected(node2_conn)) .unwrap(); let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); let (mempool2, transactions2) = new_mempool_with_transactions(3).await; MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_responder() .await .unwrap(); let transactions = get_snapshot(&mempool2).await; assert_eq!(transactions.len(), 8); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); let transactions = get_snapshot(&mempool1).await; assert_eq!(transactions.len(), 8); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); } #[tokio::test] async fn duplicate_set() { let (_, connectivity_events_tx, mempool1, transactions1) = setup(2).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx .send(ConnectivityEvent::PeerConnected(node2_conn)) .unwrap(); let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); let (mempool2, transactions2) = new_mempool_with_transactions(1).await; mempool2.insert(Arc::new(transactions1[0].clone())).await.unwrap(); MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_responder() .await .unwrap(); let transactions = get_snapshot(&mempool2).await; assert_eq!(transactions.len(), 3); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); let transactions = get_snapshot(&mempool1).await; assert_eq!(transactions.len(), 3); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); } #[tokio::test] async fn responder() { let (protocol_notif, _, _, transactions1) = setup(2).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (sock_in, sock_out) = MemorySocket::new_pair(); protocol_notif .send(ProtocolNotification::new( MEMPOOL_SYNC_PROTOCOL.clone(), ProtocolEvent::NewInboundSubstream(node1.node_id().clone(), sock_in), )) .await .unwrap(); let (mempool2, transactions2) = new_mempool_with_transactions(1).await; mempool2.insert(Arc::new(transactions1[0].clone())).await.unwrap(); let framed = framing::canonical(sock_out, MAX_FRAME_SIZE); MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_initiator() .await .unwrap(); let transactions = get_snapshot(&mempool2).await; assert_eq!(transactions.len(), 3); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); // We cannot be sure that the mempool1 contains all the transactions at this point because the initiator protocol // can complete before the responder has inserted the final transaction. There is currently no mechanism to know // this. } #[tokio::test] async fn initiator_messages() { let (protocol_notif, _, _, transactions1) = setup(2).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (sock_in, sock_out) = MemorySocket::new_pair(); protocol_notif .send(ProtocolNotification::new( MEMPOOL_SYNC_PROTOCOL.clone(), ProtocolEvent::NewInboundSubstream(node1.node_id().clone(), sock_in), )) .await .unwrap(); let mut transactions = create_transactions(2); transactions.push(transactions1[0].clone()); let mut framed = framing::canonical(sock_out, MAX_FRAME_SIZE); // As the initiator, send an inventory let inventory = proto::TransactionInventory { items: transactions .iter() .map(|tx| tx.first_kernel_excess_sig().unwrap().get_signature().to_vec()) .collect(), }; write_message(&mut framed, inventory).await; // Expect 1 transaction, a "stop message" and indexes for missing transactions let transaction: proto::TransactionItem = read_message(&mut framed).await; assert!(transaction.transaction.is_some()); let stop: proto::TransactionItem = read_message(&mut framed).await; assert!(stop.transaction.is_none()); let indexes: proto::InventoryIndexes = read_message(&mut framed).await; assert_eq!(indexes.indexes, [0, 1]); } #[tokio::test] async fn responder_messages() { let (_, connectivity_events_tx, _, transactions1) = setup(1).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx .send(ConnectivityEvent::PeerConnected(node2_conn)) .unwrap(); let substream = node1_mock.next_incoming_substream().await.unwrap(); let mut framed = framing::canonical(substream, MAX_FRAME_SIZE); // Expect an inventory let inventory: proto::TransactionInventory = read_message(&mut framed).await; assert_eq!(inventory.items.len(), 1); // Send no transactions back let nothing = proto::TransactionItem::empty(); write_message(&mut framed, nothing).await; // Send transaction indexes back let indexes = proto::InventoryIndexes { indexes: vec![0] }; write_message(&mut framed, indexes).await; // Expect a single transaction back and a stop message let transaction: proto::TransactionItem = read_message(&mut framed).await; assert_eq!( transaction .transaction .unwrap() .body .unwrap() .kernels .remove(0) .excess_sig .unwrap() .signature, transactions1[0] .first_kernel_excess_sig() .unwrap() .get_signature() .to_vec() ); let stop: proto::TransactionItem = read_message(&mut framed).await; assert!(stop.transaction.is_none()); // Except stream to end assert!(framed.next().await.is_none()); } async fn get_snapshot(mempool: &Mempool) -> Vec<Transaction> { mempool .snapshot() .await .unwrap() .iter() .map(|t| &**t) .cloned() .collect() } async fn read_message<S, T>(reader: &mut S) -> T where S: Stream<Item = io::Result<BytesMut>> + Unpin, T: prost::Message + Default, { let msg = reader.next().await.unwrap().unwrap(); T::decode(&mut msg.freeze()).unwrap() } async fn write_message<S, T>(writer: &mut S, message: T) where S: Sink<Bytes> + Unpin, S::Error: fmt::Debug, T: prost::Message, { writer.send(message.to_encoded_bytes().into()).await.unwrap(); }
37.722063
119
0.690771
7a22c3a100131fc7c6651877ea80f277aab09b37
541
// if1.rs pub fn bigger(a: i32, b: i32) -> i32 { // Complete this function to return the bigger number! // Do not use: // - another function call // - additional variables // Execute `rustlings hint if1` for hints if (a > b) { a} else{ b} } // Don't mind this for now :) #[cfg(test)] mod tests { use super::*; #[test] fn ten_is_bigger_than_eight() { assert_eq!(10, bigger(10, 8)); } #[test] fn fortytwo_is_bigger_than_thirtytwo() { assert_eq!(42, bigger(32, 42)); } }
20.037037
58
0.569316
795af4a64c7d0a9424ae1c9c2d9268a5881a5711
2,265
use std::fmt; use crate::Status; /// Trait alias for types that programmatically specify the status. /// /// For prototyping, see [`Unkind`]. /// /// # Example /// /// ```rust /// use status::Kind; /// /// #[derive(Copy, Clone, Debug, derive_more::Display)] /// enum ErrorKind { /// #[display(fmt = "Failed to read file")] /// Read, /// #[display(fmt = "Failed to parse")] /// Parse, /// } /// type Status = status::Status<ErrorKind>; /// type Result<T, E = Status> = std::result::Result<T, E>; /// /// fn read_file() -> Result<()> { /// return ErrorKind::Read.into_err(); /// } /// ``` pub trait Kind: Copy + Clone + fmt::Display + fmt::Debug + Send + Sync + 'static { /// Convenience for creating an error. fn into_status<C: crate::Context>(self) -> Status<Self, C> { Status::new(self) } /// Convenience for returning an error. fn into_err<T, C: crate::Context>(self) -> Result<T, Status<Self, C>> { Err(Status::new(self)) } } impl<U> Kind for U where U: Copy + Clone + fmt::Display + fmt::Debug + Send + Sync + 'static {} /// Adhoc [`Kind`]. /// /// Unlike most [`Kind`]s, this is meant to be opaque and not programmatically specify the status. /// It is only good for displaying a `str` to the user when prototyping before one transitions to more formal [`Kind`]s. /// /// Note: This is the default [`Kind`] for [`Status`]. /// /// When transitioning to a more useful [`Kind`], it could be helpful to have an `enum` variant /// with an `Unkind`: /// ``` /// #[derive(Copy, Clone, Debug, derive_more::Display)] /// enum ErrorKind { /// #[display(fmt = "Failed to read file")] /// Read, /// #[display(fmt = "Failed to parse")] /// Parse, /// #[display(fmt = "{}", "_0")] /// Other(status::Unkind), /// } /// ``` #[derive(Copy, Clone, Debug)] pub struct Unkind { inner: &'static str, } impl From<&'static str> for Unkind { fn from(s: &'static str) -> Self { Self { inner: s } } } impl fmt::Display for Unkind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "{}", self.inner) } } #[cfg(test)] mod test { use super::*; use static_assertions::*; #[test] fn unkind() { assert_impl_all!(Unkind: Kind); } }
25.166667
120
0.580574
500fe5963cbcef8b4a6a374dc90aa7047957eb49
19,463
//! Optimizers used to train the neural network. use arrayfire::*; use std::str::FromStr; use crate::errors::Error; use crate::io::save_vec_tensor; use crate::layers::Layer; use crate::tensor::*; /// Defines the trait that needs to be implemented by any optimizer working with neuro. pub trait Optimizer { fn name(&self) -> &str; fn update_parameters(&mut self, layer: &mut dyn Layer, layer_idx: usize); fn update_time_step(&mut self) {} fn initialize_parameters(&mut self, layers_dims: Vec<(Dim, Dim)>); fn save(&self, file: &hdf5::File) -> Result<(), Error>; } /// Stochastic Gradient Descent pub struct SGD { learning_rate: PrimitiveType, momentum: PrimitiveType, first_moment_est: [Vec<Tensor>; 2], } impl SGD { pub(crate) const NAME: &'static str = "SGD"; /// Creates a Stochastic Gradient Descent optimizer. pub fn new(learning_rate: PrimitiveType) -> Box<SGD> { Box::new(SGD { learning_rate, momentum: 0.0, first_moment_est: Default::default(), }) } /// Creates a Stochastic Gradient Descent optimizer with momentum estimation. pub fn with_param(learning_rate: PrimitiveType, momentum: PrimitiveType) -> Box<SGD> { Box::new(SGD { learning_rate, momentum, first_moment_est: Default::default(), }) } pub(crate) fn from_hdf5_group(group: &hdf5::Group) -> Box<SGD> { let learning_rate = group.dataset("learning_rate").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the learning rate."); let momentum = group.dataset("momentum").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the momentum."); let first_moment_est_0 = group.dataset("first_moment_est_0").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve first_moment_est_0."); let first_moment_est_1 = group.dataset("first_moment_est_1").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve first_moment_est_1."); Box::new(SGD { learning_rate: learning_rate[0], momentum: momentum[0], first_moment_est: [first_moment_est_0.iter().map(Tensor::from).collect::<Vec<Tensor>>(), first_moment_est_1.iter().map(Tensor::from).collect::<Vec<Tensor>>()], }) } } impl Optimizer for SGD { fn name(&self) -> &str { Self::NAME } fn update_parameters(&mut self, layer: &mut dyn Layer, layer_idx: usize ) { if let Some((mut param, dparam)) = layer.parameters_mut() { for i in 0..param.len() { self.first_moment_est[i][layer_idx] = &self.first_moment_est[i][layer_idx] * self.momentum + dparam[i] * (1. - self.momentum); self.first_moment_est[i][layer_idx].eval(); *param[i] -= &self.first_moment_est[i][layer_idx] * self.learning_rate; } } } fn initialize_parameters(&mut self, layers_dims: Vec<(Dim, Dim)>) { for dim in layers_dims { self.first_moment_est[0].push(Tensor::zeros(dim.0)); self.first_moment_est[1].push(Tensor::zeros(dim.1)); } } fn save(&self, file: &hdf5::File) -> Result<(), Error> { let optimizer = file.create_group("optimizer")?; let opt_type = optimizer.new_dataset::<hdf5::types::VarLenUnicode>().create("type", 1)?; opt_type.write(&[hdf5::types::VarLenUnicode::from_str(Self::NAME).unwrap()])?; let learning_rate = optimizer.new_dataset::<PrimitiveType>().create("learning_rate", 1)?; learning_rate.write(&[self.learning_rate])?; let momentum = optimizer.new_dataset::<PrimitiveType>().create("momentum", 1)?; momentum.write(&[self.momentum])?; save_vec_tensor(&optimizer, &self.first_moment_est[0], "first_moment_est_0")?; save_vec_tensor(&optimizer, &self.first_moment_est[1], "first_moment_est_1")?; Ok(()) } } /// Adaptive moments estimation pub struct Adam { learning_rate: PrimitiveType, beta1: PrimitiveType, beta2: PrimitiveType, eps: PrimitiveType, time_step: i32, first_moment_est: [Vec<Tensor>; 2], second_moment_est: [Vec<Tensor>; 2], } impl Adam { pub(crate) const NAME: &'static str = "Adam"; /// Creates an Adam optimizer. /// /// The exponential decay rates for the first and second moment estimates are set to 0.9 and 0.999 respectively. /// The epsilon value used for numerical stability is 1e-8. /// pub fn new(learning_rate: PrimitiveType) -> Box<Adam> { Box::new(Adam { learning_rate, beta1: 0.9, beta2: 0.999, eps: 1e-8, time_step: 0, first_moment_est: Default::default(), second_moment_est: Default::default(), }) } /// Creates an Adam optimizer with the given parameters. /// /// # Arguments /// * `learning_rate` - learning rate used to update the parameters of the layers. /// * `beta1` - exponential decay rate for the first moment estimate. /// * `beta2` - exponential decay rate for the second moment estimate. /// * `eps` - small constant used for numerical stability. /// pub fn with_param(learning_rate: PrimitiveType, beta1: PrimitiveType, beta2: PrimitiveType, eps: PrimitiveType ) -> Box<Adam> { Box::new(Adam { learning_rate, beta1, beta2, eps, time_step: 0, first_moment_est: Default::default(), second_moment_est: Default::default(), }) } pub(crate) fn from_hdf5_group(group: &hdf5::Group) -> Box<Adam> { let learning_rate = group.dataset("learning_rate").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the learning rate."); let beta1 = group.dataset("beta1").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve beta1."); let beta2 = group.dataset("beta2").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve beta2."); let eps = group.dataset("eps").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the epsilon value."); let time_step = group.dataset("time_step").and_then(|ds| ds.read_raw::<i32>()).expect("Could not retrieve the time step."); let first_moment_est_0 = group.dataset("first_moment_est_0").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve first_moment_est_0."); let first_moment_est_1 = group.dataset("first_moment_est_1").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve first_moment_est_1."); let second_moment_est_0 = group.dataset("second_moment_est_0").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve second_moment_est_0."); let second_moment_est_1 = group.dataset("second_moment_est_1").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve second_moment_est_1."); Box::new(Adam { learning_rate: learning_rate[0], beta1: beta1[0], beta2: beta2[0], eps: eps[0], time_step: time_step[0], first_moment_est: [first_moment_est_0.iter().map(Tensor::from).collect::<Vec<Tensor>>(), first_moment_est_1.iter().map(Tensor::from).collect::<Vec<Tensor>>()], second_moment_est: [second_moment_est_0.iter().map(Tensor::from).collect::<Vec<Tensor>>(), second_moment_est_1.iter().map(Tensor::from).collect::<Vec<Tensor>>()], }) } } impl Optimizer for Adam { fn name(&self) -> &str { Self::NAME } fn update_parameters(&mut self, layer: &mut dyn Layer, layer_idx: usize ) { if let Some((mut param, dparam)) = layer.parameters_mut() { for i in 0..param.len() { // Update the biased first and second moment estimates self.first_moment_est[i][layer_idx] = &self.first_moment_est[i][layer_idx] * self.beta1 + dparam[i] * (1. - self.beta1); self.second_moment_est[i][layer_idx] = &self.second_moment_est[i][layer_idx] * self.beta2 + &(dparam[i] * dparam[i]) * (1. - self.beta2); self.first_moment_est[i][layer_idx].eval(); self.second_moment_est[i][layer_idx].eval(); // Correct both estimates let first_moment_est_corr = &self.first_moment_est[i][layer_idx] / (1. - self.beta1.powi(self.time_step)); let second_moment_est_corr = &self.second_moment_est[i][layer_idx] / (1. - self.beta2.powi(self.time_step)); // Update the parameter *param[i] -= &first_moment_est_corr / (&sqrt(&second_moment_est_corr) + self.eps) * self.learning_rate; } } } fn update_time_step(&mut self) { self.time_step += 1; } fn initialize_parameters(&mut self, layers_dims: Vec<(Dim, Dim)>) { for dim in layers_dims { self.first_moment_est[0].push(Tensor::zeros(dim.0)); self.second_moment_est[0].push(Tensor::zeros(dim.0)); self.first_moment_est[1].push(Tensor::zeros(dim.1)); self.second_moment_est[1].push(Tensor::zeros(dim.1)); } } fn save(&self, file: &hdf5::File) -> Result<(), Error> { let optimizer = file.create_group("optimizer")?; let opt_type = optimizer.new_dataset::<hdf5::types::VarLenUnicode>().create("type", 1)?; opt_type.write(&[hdf5::types::VarLenUnicode::from_str(Self::NAME).unwrap()])?; let learning_rate = optimizer.new_dataset::<PrimitiveType>().create("learning_rate", 1)?; learning_rate.write(&[self.learning_rate])?; let beta1 = optimizer.new_dataset::<PrimitiveType>().create("beta1", 1)?; beta1.write(&[self.beta1])?; let beta2 = optimizer.new_dataset::<PrimitiveType>().create("beta2", 1)?; beta2.write(&[self.beta2])?; let eps = optimizer.new_dataset::<PrimitiveType>().create("eps", 1)?; eps.write(&[self.eps])?; let time_step = optimizer.new_dataset::<PrimitiveType>().create("time_step", 1)?; time_step.write(&[self.time_step])?; save_vec_tensor(&optimizer, &self.first_moment_est[0], "first_moment_est_0")?; save_vec_tensor(&optimizer, &self.first_moment_est[1], "first_moment_est_1")?; save_vec_tensor(&optimizer, &self.second_moment_est[0], "second_moment_est_0")?; save_vec_tensor(&optimizer, &self.second_moment_est[1], "second_moment_est_1")?; Ok(()) } } /// RMSProp pub struct RMSProp { learning_rate: PrimitiveType, decay_rate: PrimitiveType, eps: PrimitiveType, first_moment_est: [Vec<Tensor>; 2], } impl RMSProp { pub(crate) const NAME: &'static str = "RMSProp"; /// Creates an RMSProp optimizer. /// /// The exponential decay rate for the first moment estimate is set to 0.9 and the epsilon value used for /// numerical stability to 1e-8. /// pub fn new(learning_rate: PrimitiveType) -> Box<RMSProp> { Box::new(RMSProp { learning_rate, decay_rate: 0.9, eps: 1e-8, first_moment_est: Default::default(), }) } /// Creates an RMSProp optimizer with the given parameters. pub fn with_param(learning_rate: PrimitiveType, decay_rate: PrimitiveType, eps: PrimitiveType ) -> Box<RMSProp> { Box::new(RMSProp { learning_rate, decay_rate, eps, first_moment_est: Default::default(), }) } pub(crate) fn from_hdf5_group(group: &hdf5::Group) -> Box<RMSProp> { let learning_rate = group.dataset("learning_rate").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the learning rate."); let decay_rate = group.dataset("decay_rate").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the decay rate."); let eps = group.dataset("eps").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the epsilon value."); let first_moment_est_0 = group.dataset("first_moment_est_0").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve first_moment_est_0."); let first_moment_est_1 = group.dataset("first_moment_est_1").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve first_moment_est_1."); Box::new(RMSProp { learning_rate: learning_rate[0], decay_rate: decay_rate[0], eps: eps[0], first_moment_est: [first_moment_est_0.iter().map(Tensor::from).collect::<Vec<Tensor>>(), first_moment_est_1.iter().map(Tensor::from).collect::<Vec<Tensor>>()], }) } } impl Optimizer for RMSProp { fn name(&self) -> &str { Self::NAME } fn update_parameters(&mut self, layer: &mut dyn Layer, layer_idx: usize ) { if let Some((mut param, dparam)) = layer.parameters_mut() { for i in 0..param.len() { self.first_moment_est[i][layer_idx] = &self.first_moment_est[i][layer_idx] * self.decay_rate + &(dparam[i] * dparam[i]) * (1. - self.decay_rate); self.first_moment_est[i][layer_idx].eval(); *param[i] -= dparam[i] / (&sqrt(&self.first_moment_est[i][layer_idx]) + self.eps) * self.learning_rate; } } } fn initialize_parameters(&mut self, layers_dims: Vec<(Dim, Dim)>) { for dim in layers_dims { self.first_moment_est[0].push(Tensor::zeros(dim.0)); self.first_moment_est[1].push(Tensor::zeros(dim.1)); } } fn save(&self, file: &hdf5::File) -> Result<(), Error> { let optimizer = file.create_group("optimizer")?; let opt_type = optimizer.new_dataset::<hdf5::types::VarLenUnicode>().create("type", 1)?; opt_type.write(&[hdf5::types::VarLenUnicode::from_str(Self::NAME).unwrap()])?; let learning_rate = optimizer.new_dataset::<PrimitiveType>().create("learning_rate", 1)?; learning_rate.write(&[self.learning_rate])?; let decay_rate = optimizer.new_dataset::<PrimitiveType>().create("decay_rate", 1)?; decay_rate.write(&[self.decay_rate])?; let eps = optimizer.new_dataset::<PrimitiveType>().create("eps", 1)?; eps.write(&[self.eps])?; save_vec_tensor(&optimizer, &self.first_moment_est[0], "first_moment_est_0")?; save_vec_tensor(&optimizer, &self.first_moment_est[1], "first_moment_est_1")?; Ok(()) } } /// AdaDelta #[derive(Default)] pub struct AdaDelta { decay_rate: PrimitiveType, eps: PrimitiveType, grad_acc: [Vec<Tensor>; 2], updates_acc: [Vec<Tensor>; 2], } impl AdaDelta { pub(crate) const NAME: &'static str = "AdaDelta"; /// Creates an AdaDelta optimizer. /// /// The exponential decay rate is set to 0.95 and the epsilon value used for numerical stability to 1e-6. /// pub fn new() -> Box<AdaDelta> { Box::new(AdaDelta { decay_rate: 0.95, eps: 1e-6, grad_acc: Default::default(), updates_acc: Default::default(), }) } /// Creates an AdaDelta optimizer with the parameters. pub fn with_param(decay_rate: PrimitiveType, eps: PrimitiveType) -> Box<AdaDelta> { Box::new(AdaDelta { decay_rate, eps, grad_acc: Default::default(), updates_acc: Default::default(), }) } pub(crate) fn from_hdf5_group(group: &hdf5::Group) -> Box<AdaDelta> { let decay_rate = group.dataset("decay_rate").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the decay rate."); let eps = group.dataset("eps").and_then(|ds| ds.read_raw::<PrimitiveType>()).expect("Could not retrieve the epsilon value."); let gradacc0 = group.dataset("grad_acc_0").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve grad_acc_0."); let gradacc1 = group.dataset("grad_acc_1").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve grad_acc_1."); let updatesacc0 = group.dataset("updates_acc_0").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve updates_acc_0."); let updatesacc1 = group.dataset("updates_acc_1").and_then(|ds| ds.read_raw::<H5Tensor>()).expect("Could not retrieve updates_acc_1."); Box::new(AdaDelta { decay_rate: decay_rate[0], eps: eps[0], grad_acc: [gradacc0.iter().map(Tensor::from).collect::<Vec<Tensor>>(), gradacc1.iter().map(Tensor::from).collect::<Vec<Tensor>>()], updates_acc: [updatesacc0.iter().map(Tensor::from).collect::<Vec<Tensor>>(), updatesacc1.iter().map(Tensor::from).collect::<Vec<Tensor>>()], }) } } impl Optimizer for AdaDelta { fn name(&self) -> &str { Self::NAME } fn update_parameters(&mut self, layer: &mut dyn Layer, layer_idx: usize ) { if let Some((mut param, dparam)) = layer.parameters_mut() { for i in 0..param.len() { // Accumulate gradients self.grad_acc[i][layer_idx] = &self.grad_acc[i][layer_idx] * self.decay_rate + &(dparam[i] * dparam[i]) * (1. - self.decay_rate); // Compute update let update = - sqrt(&(&self.updates_acc[i][layer_idx] + self.eps)) / sqrt(&(&self.grad_acc[i][layer_idx] + self.eps)) * dparam[i]; // Accumulate updates self.updates_acc[i][layer_idx] = &self.updates_acc[i][layer_idx] * self.decay_rate + &(&update * &update) * (1. - self.decay_rate); self.grad_acc[i][layer_idx].eval(); self.updates_acc[i][layer_idx].eval(); // Apply update *param[i] += update; } } } fn initialize_parameters(&mut self, layers_dims: Vec<(Dim, Dim)>) { for dim in layers_dims { self.grad_acc[0].push(Tensor::zeros(dim.0)); self.updates_acc[0].push(Tensor::zeros(dim.0)); self.grad_acc[1].push(Tensor::zeros(dim.1)); self.updates_acc[1].push(Tensor::zeros(dim.1)); } } fn save(&self, file: &hdf5::File) -> Result<(), Error> { let optimizer = file.create_group("optimizer")?; let opt_type = optimizer.new_dataset::<hdf5::types::VarLenUnicode>().create("type", 1)?; opt_type.write(&[hdf5::types::VarLenUnicode::from_str(Self::NAME).unwrap()])?; let decay_rate = optimizer.new_dataset::<PrimitiveType>().create("decay_rate", 1)?; decay_rate.write(&[self.decay_rate])?; let eps = optimizer.new_dataset::<PrimitiveType>().create("eps", 1)?; eps.write(&[self.eps])?; save_vec_tensor(&optimizer, &self.grad_acc[0], "grad_acc_0")?; save_vec_tensor(&optimizer, &self.grad_acc[1], "grad_acc_1")?; save_vec_tensor(&optimizer, &self.updates_acc[0], "updates_acc_0")?; save_vec_tensor(&optimizer, &self.updates_acc[1], "updates_acc_1")?; Ok(()) } }
40.717573
174
0.610697
f84d0799ea4ada086cbf9871d64f02f61eee1b60
165,318
//! Persistent accounts are stored in below path location: //! <path>/<pid>/data/ //! //! The persistent store would allow for this mode of operation: //! - Concurrent single thread append with many concurrent readers. //! //! The underlying memory is memory mapped to a file. The accounts would be //! stored across multiple files and the mappings of file and offset of a //! particular account would be stored in a shared index. This will allow for //! concurrent commits without blocking reads, which will sequentially write //! to memory, ssd or disk, and should be as fast as the hardware allow for. //! The only required in memory data structure with a write lock is the index, //! which should be fast to update. //! //! AppendVec's only store accounts for single slots. To bootstrap the //! index from a persistent store of AppendVec's, the entries include //! a "write_version". A single global atomic `AccountsDB::write_version` //! tracks the number of commits to the entire data store. So the latest //! commit for each slot entry would be indexed. use crate::{ accounts_index::{AccountsIndex, Ancestors, SlotList, SlotSlice}, append_vec::{AppendVec, StoredAccount, StoredMeta}, }; use blake3::traits::digest::Digest; use lazy_static::lazy_static; use log::*; use rand::{thread_rng, Rng}; use rayon::{prelude::*, ThreadPool}; use serde::{Deserialize, Serialize}; use solana_measure::measure::Measure; use solana_rayon_threadlimit::get_thread_count; use solana_sdk::{ account::Account, clock::{Epoch, Slot}, genesis_config::ClusterType, hash::{Hash, Hasher}, pubkey::Pubkey, }; use std::convert::TryFrom; use std::{ collections::{HashMap, HashSet}, convert::TryInto, io::{Error as IOError, Result as IOResult}, iter::FromIterator, ops::RangeBounds, path::{Path, PathBuf}, sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, time::Instant, }; use tempfile::TempDir; const PAGE_SIZE: u64 = 4 * 1024; pub const DEFAULT_FILE_SIZE: u64 = PAGE_SIZE * 1024; pub const DEFAULT_NUM_THREADS: u32 = 8; pub const DEFAULT_NUM_DIRS: u32 = 4; lazy_static! { // FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDB panic has occurred, // as |cargo test| cannot observe panics in other threads pub static ref FROZEN_ACCOUNT_PANIC: Arc<AtomicBool> = Arc::new(AtomicBool::new(false)); } #[derive(Debug, Default)] pub struct ErrorCounters { pub total: usize, pub account_in_use: usize, pub account_loaded_twice: usize, pub account_not_found: usize, pub blockhash_not_found: usize, pub blockhash_too_old: usize, pub call_chain_too_deep: usize, pub duplicate_signature: usize, pub instruction_error: usize, pub insufficient_funds: usize, pub invalid_account_for_fee: usize, pub invalid_account_index: usize, pub invalid_program_for_execution: usize, pub not_allowed_during_cluster_maintenance: usize, } #[derive(Default, Debug, PartialEq, Clone)] pub struct AccountInfo { /// index identifying the append storage store_id: AppendVecId, /// offset into the storage offset: usize, /// lamports in the account used when squashing kept for optimization /// purposes to remove accounts with zero balance. lamports: u64, } /// An offset into the AccountsDB::storage vector pub type AppendVecId = usize; pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>; pub type SnapshotStorages = Vec<SnapshotStorage>; // Each slot has a set of storage entries. pub(crate) type SlotStores = HashMap<usize, Arc<AccountStorageEntry>>; trait Versioned { fn version(&self) -> u64; } impl Versioned for (u64, Hash) { fn version(&self) -> u64 { self.0 } } impl Versioned for (u64, AccountInfo) { fn version(&self) -> u64 { self.0 } } #[derive(Clone, Default, Debug)] pub struct AccountStorage(pub HashMap<Slot, SlotStores>); impl AccountStorage { fn scan_accounts(&self, account_info: &AccountInfo, slot: Slot) -> Option<(Account, Slot)> { self.0 .get(&slot) .and_then(|storage_map| storage_map.get(&account_info.store_id)) .and_then(|store| { Some( store .accounts .get_account(account_info.offset)? .0 .clone_account(), ) }) .map(|account| (account, slot)) } } #[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize, AbiExample, AbiEnumVisitor)] pub enum AccountStorageStatus { Available = 0, Full = 1, Candidate = 2, } impl Default for AccountStorageStatus { fn default() -> Self { Self::Available } } #[derive(Debug)] pub enum BankHashVerificationError { MismatchedAccountHash, MismatchedBankHash, MissingBankHash, MismatchedTotalLamports(u64, u64), } /// Persistent storage structure holding the accounts #[derive(Debug)] pub struct AccountStorageEntry { pub(crate) id: AppendVecId, pub(crate) slot: Slot, /// storage holding the accounts pub(crate) accounts: AppendVec, /// Keeps track of the number of accounts stored in a specific AppendVec. /// This is periodically checked to reuse the stores that do not have /// any accounts in it /// status corresponding to the storage, lets us know that /// the append_vec, once maxed out, then emptied, can be reclaimed count_and_status: RwLock<(usize, AccountStorageStatus)>, /// This is the total number of accounts stored ever since initialized to keep /// track of lifetime count of all store operations. And this differs from /// count_and_status in that this field won't be decremented. /// /// This is used as a rough estimate for slot shrinking. As such a relaxed /// use case, this value ARE NOT strictly synchronized with count_and_status! approx_store_count: AtomicUsize, } impl AccountStorageEntry { pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self { let tail = AppendVec::new_relative_path(slot, id); let path = Path::new(path).join(&tail); let accounts = AppendVec::new(&path, true, file_size as usize); Self { id, slot, accounts, count_and_status: RwLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(0), } } pub(crate) fn new_empty_map(id: AppendVecId, accounts_current_len: usize) -> Self { Self { id, slot: 0, accounts: AppendVec::new_empty_map(accounts_current_len), count_and_status: RwLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(0), } } pub fn set_status(&self, mut status: AccountStorageStatus) { let mut count_and_status = self.count_and_status.write().unwrap(); let count = count_and_status.0; if status == AccountStorageStatus::Full && count == 0 { // this case arises when the append_vec is full (store_ptrs fails), // but all accounts have already been removed from the storage // // the only time it's safe to call reset() on an append_vec is when // every account has been removed // **and** // the append_vec has previously been completely full // self.accounts.reset(); status = AccountStorageStatus::Available; } *count_and_status = (count, status); } pub fn status(&self) -> AccountStorageStatus { self.count_and_status.read().unwrap().1 } pub fn count(&self) -> usize { self.count_and_status.read().unwrap().0 } pub fn approx_stored_count(&self) -> usize { self.approx_store_count.load(Ordering::Relaxed) } pub fn has_accounts(&self) -> bool { self.count() > 0 } pub fn slot(&self) -> Slot { self.slot } pub fn append_vec_id(&self) -> AppendVecId { self.id } pub fn flush(&self) -> Result<(), IOError> { self.accounts.flush() } fn add_account(&self) { let mut count_and_status = self.count_and_status.write().unwrap(); *count_and_status = (count_and_status.0 + 1, count_and_status.1); self.approx_store_count.fetch_add(1, Ordering::Relaxed); } fn try_available(&self) -> bool { let mut count_and_status = self.count_and_status.write().unwrap(); let (count, status) = *count_and_status; if status == AccountStorageStatus::Available { *count_and_status = (count, AccountStorageStatus::Candidate); true } else { false } } fn remove_account(&self) -> usize { let mut count_and_status = self.count_and_status.write().unwrap(); let (mut count, mut status) = *count_and_status; if count == 1 && status == AccountStorageStatus::Full { // this case arises when we remove the last account from the // storage, but we've learned from previous write attempts that // the storage is full // // the only time it's safe to call reset() on an append_vec is when // every account has been removed // **and** // the append_vec has previously been completely full // // otherwise, the storage may be in flight with a store() // call self.accounts.reset(); status = AccountStorageStatus::Available; } // Some code path is removing accounts too many; this may result in an // unintended reveal of old state for unrelated accounts. assert!( count > 0, "double remove of account in slot: {}/store: {}!!", self.slot, self.id ); count -= 1; *count_and_status = (count, status); count } pub fn set_file<P: AsRef<Path>>(&mut self, path: P) -> IOResult<()> { self.accounts.set_file(path) } pub fn get_relative_path(&self) -> Option<PathBuf> { AppendVec::get_relative_path(self.accounts.get_path()) } pub fn get_path(&self) -> PathBuf { self.accounts.get_path() } } pub fn get_temp_accounts_paths(count: u32) -> IOResult<(Vec<TempDir>, Vec<PathBuf>)> { let temp_dirs: IOResult<Vec<TempDir>> = (0..count).map(|_| TempDir::new()).collect(); let temp_dirs = temp_dirs?; let paths: Vec<PathBuf> = temp_dirs.iter().map(|t| t.path().to_path_buf()).collect(); Ok((temp_dirs, paths)) } #[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)] pub struct BankHashStats { pub num_updated_accounts: u64, pub num_removed_accounts: u64, pub num_lamports_stored: u64, pub total_data_len: u64, pub num_executable_accounts: u64, } impl BankHashStats { pub fn update(&mut self, account: &Account) { if account.lamports == 0 { self.num_removed_accounts += 1; } else { self.num_updated_accounts += 1; } self.total_data_len = self.total_data_len.wrapping_add(account.data.len() as u64); if account.executable { self.num_executable_accounts += 1; } self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports); } pub fn merge(&mut self, other: &BankHashStats) { self.num_updated_accounts += other.num_updated_accounts; self.num_removed_accounts += other.num_removed_accounts; self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len); self.num_lamports_stored = self .num_lamports_stored .wrapping_add(other.num_lamports_stored); self.num_executable_accounts += other.num_executable_accounts; } } #[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)] pub struct BankHashInfo { pub hash: Hash, pub snapshot_hash: Hash, pub stats: BankHashStats, } #[derive(Debug)] struct FrozenAccountInfo { pub hash: Hash, // Hash generated by hash_frozen_account_data() pub lamports: u64, // Account balance cannot be lower than this amount } // This structure handles the load/store of the accounts #[derive(Debug)] pub struct AccountsDB { /// Keeps tracks of index into AppendVec on a per slot basis pub accounts_index: RwLock<AccountsIndex<AccountInfo>>, pub storage: RwLock<AccountStorage>, /// distribute the accounts across storage lists pub next_id: AtomicUsize, pub shrink_candidate_slots: Mutex<Vec<Slot>>, pub(crate) write_version: AtomicU64, /// Set of storage paths to pick from pub(crate) paths: Vec<PathBuf>, /// Directory of paths this accounts_db needs to hold/remove temp_paths: Option<Vec<TempDir>>, /// Starting file size of appendvecs file_size: u64, /// Accounts that will cause a panic! if data modified or lamports decrease frozen_accounts: HashMap<Pubkey, FrozenAccountInfo>, /// Thread pool used for par_iter pub thread_pool: ThreadPool, pub thread_pool_clean: ThreadPool, /// Number of append vecs to create to maximize parallelism when scanning /// the accounts min_num_stores: usize, pub bank_hashes: RwLock<HashMap<Slot, BankHashInfo>>, dead_slots: RwLock<HashSet<Slot>>, stats: AccountsStats, pub cluster_type: Option<ClusterType>, } #[derive(Debug, Default)] struct AccountsStats { delta_hash_scan_time_total_us: AtomicU64, delta_hash_accumulate_time_total_us: AtomicU64, delta_hash_merge_time_total_us: AtomicU64, delta_hash_num: AtomicU64, } fn make_min_priority_thread_pool() -> ThreadPool { // Use lower thread count to reduce priority. let num_threads = std::cmp::max(2, num_cpus::get() / 4); rayon::ThreadPoolBuilder::new() .thread_name(|i| format!("solana-accounts-cleanup-{}", i)) .num_threads(num_threads) .build() .unwrap() } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION))] impl solana_sdk::abi_example::AbiExample for AccountsDB { fn example() -> Self { let accounts_db = AccountsDB::new_single(); let key = Pubkey::default(); let some_data_len = 5; let some_slot: Slot = 0; let account = Account::new(1, some_data_len, &key); accounts_db.store(some_slot, &[(&key, &account)]); accounts_db.add_root(0); accounts_db } } impl Default for AccountsDB { fn default() -> Self { let num_threads = get_thread_count(); let mut bank_hashes = HashMap::new(); bank_hashes.insert(0, BankHashInfo::default()); AccountsDB { accounts_index: RwLock::new(AccountsIndex::default()), storage: RwLock::new(AccountStorage(HashMap::new())), next_id: AtomicUsize::new(0), shrink_candidate_slots: Mutex::new(Vec::new()), write_version: AtomicU64::new(0), paths: vec![], temp_paths: None, file_size: DEFAULT_FILE_SIZE, thread_pool: rayon::ThreadPoolBuilder::new() .num_threads(num_threads) .thread_name(|i| format!("solana-accounts-db-{}", i)) .build() .unwrap(), thread_pool_clean: make_min_priority_thread_pool(), min_num_stores: num_threads, bank_hashes: RwLock::new(bank_hashes), frozen_accounts: HashMap::new(), dead_slots: RwLock::new(HashSet::new()), stats: AccountsStats::default(), cluster_type: None, } } } impl AccountsDB { pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self { let new = if !paths.is_empty() { Self { paths, temp_paths: None, cluster_type: Some(*cluster_type), ..Self::default() } } else { // Create a temporary set of accounts directories, used primarily // for testing let (temp_dirs, paths) = get_temp_accounts_paths(DEFAULT_NUM_DIRS).unwrap(); Self { paths, temp_paths: Some(temp_dirs), cluster_type: Some(*cluster_type), ..Self::default() } }; { for path in new.paths.iter() { std::fs::create_dir_all(path).expect("Create directory failed."); } } new } #[cfg(test)] pub fn new_single() -> Self { AccountsDB { min_num_stores: 0, ..AccountsDB::new(Vec::new(), &ClusterType::Development) } } #[cfg(test)] pub fn new_sized(paths: Vec<PathBuf>, file_size: u64) -> Self { AccountsDB { file_size, ..AccountsDB::new(paths, &ClusterType::Development) } } fn new_storage_entry(&self, slot: Slot, path: &Path, size: u64) -> AccountStorageEntry { AccountStorageEntry::new( path, slot, self.next_id.fetch_add(1, Ordering::Relaxed), size, ) } // Reclaim older states of rooted non-zero lamport accounts as a general // AccountsDB bloat mitigation and preprocess for better zero-lamport purging. fn clean_old_rooted_accounts(&self, purges_in_root: Vec<Pubkey>) { // This number isn't carefully chosen; just guessed randomly such that // the hot loop will be the order of ~Xms. const INDEX_CLEAN_BULK_COUNT: usize = 4096; let mut clean_rooted = Measure::start("clean_old_root-ms"); let reclaim_vecs = purges_in_root .par_chunks(INDEX_CLEAN_BULK_COUNT) .map(|pubkeys: &[Pubkey]| { let mut reclaims = Vec::new(); let accounts_index = self.accounts_index.read().unwrap(); for pubkey in pubkeys { accounts_index.clean_rooted_entries(&pubkey, &mut reclaims); } reclaims }); let reclaims: Vec<_> = reclaim_vecs.flatten().collect(); clean_rooted.stop(); inc_new_counter_info!("clean-old-root-par-clean-ms", clean_rooted.as_ms() as usize); let mut measure = Measure::start("clean_old_root_reclaims"); self.handle_reclaims_maybe_cleanup(&reclaims); measure.stop(); debug!("{} {}", clean_rooted, measure); inc_new_counter_info!("clean-old-root-reclaim-ms", measure.as_ms() as usize); } fn do_reset_uncleaned_roots(&self, candidates: &mut MutexGuard<Vec<Slot>>) { let previous_roots = self.accounts_index.write().unwrap().reset_uncleaned_roots(); candidates.extend(previous_roots); } #[cfg(test)] fn reset_uncleaned_roots(&self) { self.do_reset_uncleaned_roots(&mut self.shrink_candidate_slots.lock().unwrap()); } fn calc_delete_dependencies( purges: &HashMap<Pubkey, (SlotList<AccountInfo>, u64)>, store_counts: &mut HashMap<AppendVecId, (usize, HashSet<Pubkey>)>, ) { // Another pass to check if there are some filtered accounts which // do not match the criteria of deleting all appendvecs which contain them // then increment their storage count. let mut already_counted = HashSet::new(); for (_pubkey, (account_infos, ref_count_from_storage)) in purges.iter() { let no_delete = if account_infos.len() as u64 != *ref_count_from_storage { true } else { let mut no_delete = false; for (_slot, account_info) in account_infos { if store_counts.get(&account_info.store_id).unwrap().0 != 0 { no_delete = true; break; } } no_delete }; if no_delete { let mut pending_store_ids: HashSet<usize> = HashSet::new(); for (_slot_id, account_info) in account_infos { if !already_counted.contains(&account_info.store_id) { pending_store_ids.insert(account_info.store_id); } } while !pending_store_ids.is_empty() { let id = pending_store_ids.iter().next().cloned().unwrap(); pending_store_ids.remove(&id); if already_counted.contains(&id) { continue; } store_counts.get_mut(&id).unwrap().0 += 1; already_counted.insert(id); let affected_pubkeys = &store_counts.get(&id).unwrap().1; for key in affected_pubkeys { for (_slot, account_info) in &purges.get(&key).unwrap().0 { if !already_counted.contains(&account_info.store_id) { pending_store_ids.insert(account_info.store_id); } } } } } } } fn purge_keys_exact( &self, pubkey_to_slot_set: Vec<(Pubkey, HashSet<Slot>)>, ) -> (Vec<(u64, AccountInfo)>, Vec<Pubkey>) { let mut reclaims = Vec::new(); let mut dead_keys = Vec::new(); let accounts_index = self.accounts_index.read().unwrap(); for (pubkey, slots_set) in pubkey_to_slot_set { let (new_reclaims, is_empty) = accounts_index.purge_exact(&pubkey, slots_set); if is_empty { dead_keys.push(pubkey); } reclaims.extend(new_reclaims); } (reclaims, dead_keys) } // Purge zero lamport accounts and older rooted account states as garbage // collection // Only remove those accounts where the entire rooted history of the account // can be purged because there are no live append vecs in the ancestors pub fn clean_accounts(&self) { // hold a lock to prevent slot shrinking from running because it might modify some rooted // slot storages which can not happen as long as we're cleaning accounts because we're also // modifying the rooted slot storages! let mut candidates = self.shrink_candidate_slots.lock().unwrap(); self.report_store_stats(); let mut accounts_scan = Measure::start("accounts_scan"); let accounts_index = self.accounts_index.read().unwrap(); let pubkeys: Vec<Pubkey> = accounts_index.account_maps.keys().cloned().collect(); // parallel scan the index. let (mut purges, purges_in_root) = pubkeys .par_chunks(4096) .map(|pubkeys: &[Pubkey]| { let mut purges_in_root = Vec::new(); let mut purges = HashMap::new(); for pubkey in pubkeys { if let Some((list, index)) = accounts_index.get(pubkey, None) { let (slot, account_info) = &list[index]; if account_info.lamports == 0 { purges.insert(*pubkey, accounts_index.would_purge(pubkey)); } else if accounts_index.uncleaned_roots.contains(slot) { purges_in_root.push(*pubkey); } } } (purges, purges_in_root) }) .reduce( || (HashMap::new(), Vec::new()), |mut m1, m2| { // Collapse down the hashmaps/vecs into one. m1.0.extend(m2.0); m1.1.extend(m2.1); m1 }, ); drop(accounts_index); accounts_scan.stop(); let mut clean_old_rooted = Measure::start("clean_old_roots"); if !purges_in_root.is_empty() { self.clean_old_rooted_accounts(purges_in_root); } self.do_reset_uncleaned_roots(&mut candidates); clean_old_rooted.stop(); let mut store_counts_time = Measure::start("store_counts"); // Calculate store counts as if everything was purged // Then purge if we can let mut store_counts: HashMap<AppendVecId, (usize, HashSet<Pubkey>)> = HashMap::new(); let storage = self.storage.read().unwrap(); for (key, (account_infos, _ref_count)) in &purges { for (slot, account_info) in account_infos { let slot_storage = storage.0.get(&slot).unwrap(); let store = slot_storage.get(&account_info.store_id).unwrap(); if let Some(store_count) = store_counts.get_mut(&account_info.store_id) { store_count.0 -= 1; store_count.1.insert(*key); } else { let mut key_set = HashSet::new(); key_set.insert(*key); store_counts.insert( account_info.store_id, (store.count_and_status.read().unwrap().0 - 1, key_set), ); } } } store_counts_time.stop(); drop(storage); let mut calc_deps_time = Measure::start("calc_deps"); Self::calc_delete_dependencies(&purges, &mut store_counts); calc_deps_time.stop(); // Only keep purges where the entire history of the account in the root set // can be purged. All AppendVecs for those updates are dead. let mut purge_filter = Measure::start("purge_filter"); purges.retain(|_pubkey, (account_infos, _ref_count)| { for (_slot, account_info) in account_infos.iter() { if store_counts.get(&account_info.store_id).unwrap().0 != 0 { return false; } } true }); purge_filter.stop(); let mut reclaims_time = Measure::start("reclaims"); // Recalculate reclaims with new purge set let pubkey_to_slot_set: Vec<_> = purges .into_iter() .map(|(key, (slots_list, _ref_count))| { ( key, HashSet::from_iter(slots_list.into_iter().map(|(slot, _)| slot)), ) }) .collect(); let (reclaims, dead_keys) = self.purge_keys_exact(pubkey_to_slot_set); self.handle_dead_keys(dead_keys); self.handle_reclaims_maybe_cleanup(&reclaims); reclaims_time.stop(); datapoint_info!( "clean_accounts", ("accounts_scan", accounts_scan.as_us() as i64, i64), ("store_counts", store_counts_time.as_us() as i64, i64), ("purge_filter", purge_filter.as_us() as i64, i64), ("calc_deps", calc_deps_time.as_us() as i64, i64), ("reclaims", reclaims_time.as_us() as i64, i64), ); } fn handle_dead_keys(&self, dead_keys: Vec<Pubkey>) { if !dead_keys.is_empty() { let mut accounts_index = self.accounts_index.write().unwrap(); for key in &dead_keys { if let Some((_ref_count, list)) = accounts_index.account_maps.get(key) { if list.read().unwrap().is_empty() { accounts_index.account_maps.remove(key); } } } } } fn handle_reclaims_maybe_cleanup(&self, reclaims: SlotSlice<AccountInfo>) { let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts"); let dead_slots = self.remove_dead_accounts(reclaims); dead_accounts.stop(); let dead_slots_len = { let mut dead_slots_w = self.dead_slots.write().unwrap(); dead_slots_w.extend(dead_slots); dead_slots_w.len() }; if dead_slots_len > 5000 { self.process_dead_slots(None); } } // Atomically process reclaims and new dead_slots in this thread, guaranteeing // complete data removal for slots in reclaims. fn handle_reclaims_ensure_cleanup(&self, reclaims: SlotSlice<AccountInfo>) { let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts"); let dead_slots = self.remove_dead_accounts(reclaims); dead_accounts.stop(); let mut dead_slots_w = self.dead_slots.write().unwrap(); dead_slots_w.extend(dead_slots); self.process_dead_slots(Some(dead_slots_w)); } pub fn process_dead_slots<'a>( &'a self, dead_slots_w: Option<RwLockWriteGuard<'a, HashSet<Slot>>>, ) { let empty = HashSet::new(); let mut dead_slots_w = dead_slots_w.unwrap_or_else(|| self.dead_slots.write().unwrap()); let dead_slots = std::mem::replace(&mut *dead_slots_w, empty); drop(dead_slots_w); let mut clean_dead_slots = Measure::start("reclaims::purge_slots"); self.clean_dead_slots(&dead_slots); clean_dead_slots.stop(); let mut purge_slots = Measure::start("reclaims::purge_slots"); self.purge_slots(&dead_slots); purge_slots.stop(); debug!( "process_dead_slots({}): {} {}", dead_slots.len(), clean_dead_slots, purge_slots ); } fn do_shrink_stale_slot(&self, slot: Slot) -> usize { self.do_shrink_slot(slot, false) } fn do_shrink_slot_forced(&self, slot: Slot) { self.do_shrink_slot(slot, true); } fn shrink_stale_slot(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> usize { if let Some(slot) = self.do_next_shrink_slot(candidates) { self.do_shrink_stale_slot(slot) } else { 0 } } // Reads all accounts in given slot's AppendVecs and filter only to alive, // then create a minimum AppendVec filled with the alive. fn do_shrink_slot(&self, slot: Slot, forced: bool) -> usize { trace!("shrink_stale_slot: slot: {}", slot); let mut stored_accounts = vec![]; { let storage = self.storage.read().unwrap(); if let Some(stores) = storage.0.get(&slot) { let mut alive_count = 0; let mut stored_count = 0; for store in stores.values() { alive_count += store.count(); stored_count += store.approx_stored_count(); } if alive_count == stored_count && stores.values().len() == 1 { trace!( "shrink_stale_slot: not able to shrink at all{}: {} / {}", alive_count, stored_count, if forced { " (forced)" } else { "" }, ); return 0; } else if (alive_count as f32 / stored_count as f32) >= 0.80 && !forced { trace!( "shrink_stale_slot: not enough space to shrink: {} / {}", alive_count, stored_count, ); return 0; } for store in stores.values() { let mut start = 0; while let Some((account, next)) = store.accounts.get_account(start) { stored_accounts.push(( account.meta.pubkey, account.clone_account(), *account.hash, next - start, (store.id, account.offset), account.meta.write_version, )); start = next; } } } } let alive_accounts: Vec<_> = { let accounts_index = self.accounts_index.read().unwrap(); stored_accounts .iter() .filter( |( pubkey, _account, _account_hash, _storage_size, (store_id, offset), _write_version, )| { if let Some((list, _)) = accounts_index.get(pubkey, None) { list.iter() .any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset) } else { false } }, ) .collect() }; let alive_total: u64 = alive_accounts .iter() .map( |(_pubkey, _account, _account_hash, account_size, _location, _write_verion)| { *account_size as u64 }, ) .sum(); let aligned_total: u64 = (alive_total + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1); debug!( "shrinking: slot: {}, stored_accounts: {} => alive_accounts: {} ({} bytes; aligned to: {})", slot, stored_accounts.len(), alive_accounts.len(), alive_total, aligned_total ); if aligned_total > 0 { let mut accounts = Vec::with_capacity(alive_accounts.len()); let mut hashes = Vec::with_capacity(alive_accounts.len()); let mut write_versions = Vec::with_capacity(alive_accounts.len()); for (pubkey, account, account_hash, _size, _location, write_version) in &alive_accounts { accounts.push((pubkey, account)); hashes.push(*account_hash); write_versions.push(*write_version); } let shrunken_store = self.create_and_insert_store(slot, aligned_total); // here, we're writing back alive_accounts. That should be an atomic operation // without use of rather wide locks in this whole function, because we're // mutating rooted slots; There should be no writers to them. let infos = self.store_accounts_to( slot, &accounts, &hashes, |_| shrunken_store.clone(), write_versions.into_iter(), ); let reclaims = self.update_index(slot, infos, &accounts); self.handle_reclaims_maybe_cleanup(&reclaims); let mut storage = self.storage.write().unwrap(); if let Some(slot_storage) = storage.0.get_mut(&slot) { slot_storage.retain(|_key, store| store.count() > 0); } } alive_accounts.len() } // Infinitely returns rooted roots in cyclic order fn do_next_shrink_slot(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> Option<Slot> { // At this point, a lock (= candidates) is ensured to be held to keep // do_reset_uncleaned_roots() (in clean_accounts()) from updating candidates. // Also, candidates in the lock may be swapped here if it's empty. let next = candidates.pop(); if next.is_some() { next } else { let mut new_all_slots = self.all_root_slots_in_index(); let next = new_all_slots.pop(); // refresh candidates for later calls! **candidates = new_all_slots; next } } #[cfg(test)] fn next_shrink_slot(&self) -> Option<Slot> { let mut candidates = self.shrink_candidate_slots.lock().unwrap(); self.do_next_shrink_slot(&mut candidates) } fn all_root_slots_in_index(&self) -> Vec<Slot> { let index = self.accounts_index.read().unwrap(); index.roots.iter().cloned().collect() } fn all_slots_in_storage(&self) -> Vec<Slot> { let storage = self.storage.read().unwrap(); storage.0.keys().cloned().collect() } pub fn process_stale_slot(&self) -> usize { let mut measure = Measure::start("stale_slot_shrink-ms"); let candidates = self.shrink_candidate_slots.try_lock(); if candidates.is_err() { // skip and return immediately if locked by clean_accounts() // the calling background thread will just retry later. return 0; } // hold this lock as long as this shrinking process is running to avoid conflicts // with clean_accounts(). let mut candidates = candidates.unwrap(); let count = self.shrink_stale_slot(&mut candidates); measure.stop(); inc_new_counter_info!("stale_slot_shrink-ms", measure.as_ms() as usize); count } #[cfg(test)] fn shrink_all_stale_slots(&self) { for slot in self.all_slots_in_storage() { self.do_shrink_stale_slot(slot); } } pub fn shrink_all_slots(&self) { for slot in self.all_slots_in_storage() { self.do_shrink_slot_forced(slot); } } pub fn scan_accounts<F, A>(&self, ancestors: &Ancestors, scan_func: F) -> A where F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>), A: Default, { let mut collector = A::default(); let accounts_index = self.accounts_index.read().unwrap(); let storage = self.storage.read().unwrap(); accounts_index.scan_accounts(ancestors, |pubkey, (account_info, slot)| { scan_func( &mut collector, storage .scan_accounts(account_info, slot) .map(|(account, slot)| (pubkey, account, slot)), ) }); collector } pub fn range_scan_accounts<F, A, R>(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A where F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>), A: Default, R: RangeBounds<Pubkey>, { let mut collector = A::default(); let accounts_index = self.accounts_index.read().unwrap(); let storage = self.storage.read().unwrap(); accounts_index.range_scan_accounts(ancestors, range, |pubkey, (account_info, slot)| { scan_func( &mut collector, storage .scan_accounts(account_info, slot) .map(|(account, slot)| (pubkey, account, slot)), ) }); collector } /// Scan a specific slot through all the account storage in parallel with sequential read // PERF: Sequentially read each storage entry in parallel pub fn scan_account_storage<F, B>(&self, slot: Slot, scan_func: F) -> Vec<B> where F: Fn(&StoredAccount, AppendVecId, &mut B) + Send + Sync, B: Send + Default, { self.scan_account_storage_inner(slot, scan_func, &self.storage.read().unwrap()) } // The input storage must come from self.storage.read().unwrap() fn scan_account_storage_inner<F, B>( &self, slot: Slot, scan_func: F, storage: &RwLockReadGuard<AccountStorage>, ) -> Vec<B> where F: Fn(&StoredAccount, AppendVecId, &mut B) + Send + Sync, B: Send + Default, { let storage_maps: Vec<Arc<AccountStorageEntry>> = storage .0 .get(&slot) .unwrap_or(&HashMap::new()) .values() .cloned() .collect(); self.thread_pool.install(|| { storage_maps .into_par_iter() .map(|storage| { let accounts = storage.accounts.accounts(0); let mut retval = B::default(); accounts.iter().for_each(|stored_account| { scan_func(stored_account, storage.id, &mut retval) }); retval }) .collect() }) } pub fn set_hash(&self, slot: Slot, parent_slot: Slot) { let mut bank_hashes = self.bank_hashes.write().unwrap(); if bank_hashes.get(&slot).is_some() { error!( "set_hash: already exists; multiple forks with shared slot {} as child (parent: {})!?", slot, parent_slot, ); return; } let new_hash_info = BankHashInfo { hash: Hash::default(), snapshot_hash: Hash::default(), stats: BankHashStats::default(), }; bank_hashes.insert(slot, new_hash_info); } pub fn load( storage: &AccountStorage, ancestors: &Ancestors, accounts_index: &AccountsIndex<AccountInfo>, pubkey: &Pubkey, ) -> Option<(Account, Slot)> { let (lock, index) = accounts_index.get(pubkey, Some(ancestors))?; let slot = lock[index].0; //TODO: thread this as a ref if let Some(slot_storage) = storage.0.get(&slot) { let info = &lock[index].1; slot_storage .get(&info.store_id) .and_then(|store| Some(store.accounts.get_account(info.offset)?.0.clone_account())) .map(|account| (account, slot)) } else { None } } #[cfg(test)] fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash { let accounts_index = self.accounts_index.read().unwrap(); let (lock, index) = accounts_index.get(pubkey, Some(ancestors)).unwrap(); let slot = lock[index].0; let storage = self.storage.read().unwrap(); let slot_storage = storage.0.get(&slot).unwrap(); let info = &lock[index].1; let entry = slot_storage.get(&info.store_id).unwrap(); let account = entry.accounts.get_account(info.offset); *account.as_ref().unwrap().0.hash } pub fn load_slow(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> { let accounts_index = self.accounts_index.read().unwrap(); let storage = self.storage.read().unwrap(); Self::load(&storage, ancestors, &accounts_index, pubkey) } fn find_storage_candidate(&self, slot: Slot) -> Arc<AccountStorageEntry> { let mut create_extra = false; let stores = self.storage.read().unwrap(); if let Some(slot_stores) = stores.0.get(&slot) { if !slot_stores.is_empty() { if slot_stores.len() <= self.min_num_stores { let mut total_accounts = 0; for store in slot_stores.values() { total_accounts += store.count_and_status.read().unwrap().0; } // Create more stores so that when scanning the storage all CPUs have work if (total_accounts / 16) >= slot_stores.len() { create_extra = true; } } // pick an available store at random by iterating from a random point let to_skip = thread_rng().gen_range(0, slot_stores.len()); for (i, store) in slot_stores.values().cycle().skip(to_skip).enumerate() { if store.try_available() { let ret = store.clone(); drop(stores); if create_extra { self.create_and_insert_store(slot, self.file_size); } return ret; } // looked at every store, bail... if i == slot_stores.len() { break; } } } } drop(stores); let store = self.create_and_insert_store(slot, self.file_size); store.try_available(); store } fn create_and_insert_store(&self, slot: Slot, size: u64) -> Arc<AccountStorageEntry> { let path_index = thread_rng().gen_range(0, self.paths.len()); let store = Arc::new(self.new_storage_entry(slot, &Path::new(&self.paths[path_index]), size)); let store_for_index = store.clone(); let mut stores = self.storage.write().unwrap(); let slot_storage = stores.0.entry(slot).or_insert_with(HashMap::new); slot_storage.insert(store.id, store_for_index); store } pub fn purge_slot(&self, slot: Slot) { let mut slots = HashSet::new(); slots.insert(slot); self.purge_slots(&slots); } pub fn purge_slots(&self, slots: &HashSet<Slot>) { //add_root should be called first let accounts_index = self.accounts_index.read().unwrap(); let non_roots: Vec<_> = slots .iter() .filter(|slot| !accounts_index.is_root(**slot)) .collect(); drop(accounts_index); let mut storage = self.storage.write().unwrap(); for slot in non_roots { storage.0.remove(&slot); } } pub fn remove_unrooted_slot(&self, remove_slot: Slot) { if self.accounts_index.read().unwrap().is_root(remove_slot) { panic!("Trying to remove accounts for rooted slot {}", remove_slot); } let pubkey_sets: Vec<HashSet<Pubkey>> = self.scan_account_storage( remove_slot, |stored_account: &StoredAccount, _, accum: &mut HashSet<Pubkey>| { accum.insert(stored_account.meta.pubkey); }, ); // Purge this slot from the accounts index let mut reclaims = vec![]; { let pubkeys = pubkey_sets.iter().flatten(); let accounts_index = self.accounts_index.read().unwrap(); for pubkey in pubkeys { accounts_index.clean_unrooted_entries_by_slot(remove_slot, pubkey, &mut reclaims); } } // 1) Remove old bank hash from self.bank_hashes // 2) Purge this slot's storage entries from self.storage self.handle_reclaims_ensure_cleanup(&reclaims); assert!(self.storage.read().unwrap().0.get(&remove_slot).is_none()); } fn include_owner(cluster_type: &ClusterType, slot: Slot) -> bool { // When devnet was moved to stable release channel, it was done without // hashing account.owner. That's because devnet's slot was lower than // 5_800_000 and the release channel's gating lacked ClusterType at the time... match cluster_type { ClusterType::Devnet => slot >= 5_800_000, _ => true, } } pub fn hash_stored_account( slot: Slot, account: &StoredAccount, cluster_type: &ClusterType, ) -> Hash { let include_owner = Self::include_owner(cluster_type, slot); if slot > Self::get_blake3_slot(cluster_type) { Self::blake3_hash_account_data( slot, account.account_meta.lamports, &account.account_meta.owner, account.account_meta.executable, account.account_meta.rent_epoch, account.data, &account.meta.pubkey, include_owner, ) } else { Self::hash_account_data( slot, account.account_meta.lamports, &account.account_meta.owner, account.account_meta.executable, account.account_meta.rent_epoch, account.data, &account.meta.pubkey, include_owner, ) } } pub fn hash_account( slot: Slot, account: &Account, pubkey: &Pubkey, cluster_type: &ClusterType, ) -> Hash { let include_owner = Self::include_owner(cluster_type, slot); if slot > Self::get_blake3_slot(cluster_type) { Self::blake3_hash_account_data( slot, account.lamports, &account.owner, account.executable, account.rent_epoch, &account.data, pubkey, include_owner, ) } else { Self::hash_account_data( slot, account.lamports, &account.owner, account.executable, account.rent_epoch, &account.data, pubkey, include_owner, ) } } fn hash_frozen_account_data(account: &Account) -> Hash { let mut hasher = Hasher::default(); hasher.hash(&account.data); hasher.hash(&account.owner.as_ref()); if account.executable { hasher.hash(&[1u8; 1]); } else { hasher.hash(&[0u8; 1]); } hasher.result() } pub fn hash_account_data( slot: Slot, lamports: u64, owner: &Pubkey, executable: bool, rent_epoch: Epoch, data: &[u8], pubkey: &Pubkey, include_owner: bool, ) -> Hash { if lamports == 0 { return Hash::default(); } let mut hasher = Hasher::default(); hasher.hash(&lamports.to_le_bytes()); hasher.hash(&slot.to_le_bytes()); hasher.hash(&rent_epoch.to_le_bytes()); hasher.hash(&data); if executable { hasher.hash(&[1u8; 1]); } else { hasher.hash(&[0u8; 1]); } if include_owner { hasher.hash(&owner.as_ref()); } hasher.hash(&pubkey.as_ref()); hasher.result() } pub fn blake3_hash_account_data( slot: Slot, lamports: u64, owner: &Pubkey, executable: bool, rent_epoch: Epoch, data: &[u8], pubkey: &Pubkey, include_owner: bool, ) -> Hash { if lamports == 0 { return Hash::default(); } let mut hasher = blake3::Hasher::new(); hasher.update(&lamports.to_le_bytes()); hasher.update(&slot.to_le_bytes()); hasher.update(&rent_epoch.to_le_bytes()); hasher.update(&data); if executable { hasher.update(&[1u8; 1]); } else { hasher.update(&[0u8; 1]); } if include_owner { hasher.update(&owner.as_ref()); } hasher.update(&pubkey.as_ref()); Hash(<[u8; solana_sdk::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap()) } fn get_blake3_slot(cluster_type: &ClusterType) -> Slot { match cluster_type { ClusterType::Development => 0, // Epoch 400 ClusterType::Devnet => 3_276_800, // Epoch 78 ClusterType::MainnetBeta => 33_696_000, // Epoch 95 ClusterType::Testnet => 35_516_256, } } fn bulk_assign_write_version(&self, count: usize) -> u64 { self.write_version .fetch_add(count as u64, Ordering::Relaxed) } fn store_accounts( &self, slot: Slot, accounts: &[(&Pubkey, &Account)], hashes: &[Hash], ) -> Vec<AccountInfo> { let mut current_version = self.bulk_assign_write_version(accounts.len()); let write_version_producer = std::iter::from_fn(move || { let ret = current_version; current_version += 1; Some(ret) }); let storage_finder = |slot| self.find_storage_candidate(slot); self.store_accounts_to( slot, accounts, hashes, storage_finder, write_version_producer, ) } fn store_accounts_to<F: FnMut(Slot) -> Arc<AccountStorageEntry>, P: Iterator<Item = u64>>( &self, slot: Slot, accounts: &[(&Pubkey, &Account)], hashes: &[Hash], mut storage_finder: F, mut write_version_producer: P, ) -> Vec<AccountInfo> { let default_account = Account::default(); let with_meta: Vec<(StoredMeta, &Account)> = accounts .iter() .map(|(pubkey, account)| { let account = if account.lamports == 0 { &default_account } else { *account }; let data_len = account.data.len() as u64; let meta = StoredMeta { write_version: write_version_producer.next().unwrap(), pubkey: **pubkey, data_len, }; (meta, account) }) .collect(); let mut infos: Vec<AccountInfo> = Vec::with_capacity(with_meta.len()); while infos.len() < with_meta.len() { let storage = storage_finder(slot); let rvs = storage .accounts .append_accounts(&with_meta[infos.len()..], &hashes[infos.len()..]); if rvs.is_empty() { storage.set_status(AccountStorageStatus::Full); // See if an account overflows the default append vec size. let data_len = (with_meta[infos.len()].1.data.len() + 4096) as u64; if data_len > self.file_size { self.create_and_insert_store(slot, data_len * 2); } continue; } for (offset, (_, account)) in rvs.iter().zip(&with_meta[infos.len()..]) { storage.add_account(); infos.push(AccountInfo { store_id: storage.id, offset: *offset, lamports: account.lamports, }); } // restore the state to available storage.set_status(AccountStorageStatus::Available); } infos } fn report_store_stats(&self) { let mut total_count = 0; let mut min = std::usize::MAX; let mut min_slot = 0; let mut max = 0; let mut max_slot = 0; let mut newest_slot = 0; let mut oldest_slot = std::u64::MAX; let stores = self.storage.read().unwrap(); for (slot, slot_stores) in &stores.0 { total_count += slot_stores.len(); if slot_stores.len() < min { min = slot_stores.len(); min_slot = *slot; } if slot_stores.len() > max { max = slot_stores.len(); max_slot = *slot; } if *slot > newest_slot { newest_slot = *slot; } if *slot < oldest_slot { oldest_slot = *slot; } } drop(stores); info!("total_stores: {}, newest_slot: {}, oldest_slot: {}, max_slot: {} (num={}), min_slot: {} (num={})", total_count, newest_slot, oldest_slot, max_slot, max, min_slot, min); datapoint_info!("accounts_db-stores", ("total_count", total_count, i64)); datapoint_info!( "accounts_db-perf-stats", ( "delta_hash_num", self.stats.delta_hash_num.swap(0, Ordering::Relaxed), i64 ), ( "delta_hash_scan_us", self.stats .delta_hash_scan_time_total_us .swap(0, Ordering::Relaxed), i64 ), ( "delta_hash_merge_us", self.stats .delta_hash_merge_time_total_us .swap(0, Ordering::Relaxed), i64 ), ( "delta_hash_accumulate_us", self.stats .delta_hash_accumulate_time_total_us .swap(0, Ordering::Relaxed), i64 ), ); } pub fn compute_merkle_root(hashes: Vec<(Pubkey, Hash, u64)>, fanout: usize) -> Hash { let hashes: Vec<_> = hashes .into_iter() .map(|(_pubkey, hash, _lamports)| hash) .collect(); let mut hashes: Vec<_> = hashes.chunks(fanout).map(|x| x.to_vec()).collect(); while hashes.len() > 1 { let mut time = Measure::start("time"); let new_hashes: Vec<Hash> = hashes .par_iter() .map(|h| { let mut hasher = Hasher::default(); for v in h.iter() { hasher.hash(v.as_ref()); } hasher.result() }) .collect(); time.stop(); debug!("hashing {} {}", hashes.len(), time); hashes = new_hashes.chunks(fanout).map(|x| x.to_vec()).collect(); } let mut hasher = Hasher::default(); hashes.into_iter().flatten().for_each(|hash| { hasher.hash(hash.as_ref()); }); hasher.result() } fn accumulate_account_hashes(hashes: Vec<(Pubkey, Hash, u64)>) -> Hash { let (hash, ..) = Self::do_accumulate_account_hashes_and_capitalization(hashes, false); hash } fn accumulate_account_hashes_and_capitalization( hashes: Vec<(Pubkey, Hash, u64)>, ) -> (Hash, u64) { let (hash, cap) = Self::do_accumulate_account_hashes_and_capitalization(hashes, true); (hash, cap.unwrap()) } fn do_accumulate_account_hashes_and_capitalization( mut hashes: Vec<(Pubkey, Hash, u64)>, calculate_cap: bool, ) -> (Hash, Option<u64>) { let mut sort_time = Measure::start("sort"); hashes.par_sort_by(|a, b| a.0.cmp(&b.0)); sort_time.stop(); let mut sum_time = Measure::start("cap"); let cap = if calculate_cap { Some(Self::checked_sum_for_capitalization( hashes.iter().map(|(_, _, lamports)| *lamports), )) } else { None }; sum_time.stop(); let mut hash_time = Measure::start("hash"); let fanout = 16; let res = Self::compute_merkle_root(hashes, fanout); hash_time.stop(); debug!("{} {} {}", sort_time, hash_time, sum_time); (res, cap) } pub fn checked_sum_for_capitalization<T: Iterator<Item = u64>>(balances: T) -> u64 { balances .map(|b| b as u128) .sum::<u128>() .try_into() .expect("overflow is detected while summing capitalization") } pub fn account_balance_for_capitalization( lamports: u64, owner: &Pubkey, executable: bool, ) -> u64 { let is_specially_retained = (solana_sdk::native_loader::check_id(owner) && executable) || solana_sdk::sysvar::check_id(owner); if is_specially_retained { // specially retained accounts always have an initial 1 lamport // balance, but could be modified by transfers which increase // the balance but don't affect the capitalization. lamports - 1 } else { lamports } } fn calculate_accounts_hash( &self, ancestors: &Ancestors, check_hash: bool, ) -> Result<(Hash, u64), BankHashVerificationError> { use BankHashVerificationError::*; let mut scan = Measure::start("scan"); let accounts_index = self.accounts_index.read().unwrap(); let storage = self.storage.read().unwrap(); let keys: Vec<_> = accounts_index.account_maps.keys().collect(); let mismatch_found = AtomicU64::new(0); let hashes: Vec<_> = keys .par_iter() .filter_map(|pubkey| { if let Some((list, index)) = accounts_index.get(pubkey, Some(ancestors)) { let (slot, account_info) = &list[index]; if account_info.lamports != 0 { storage .0 .get(&slot) .and_then(|storage_map| storage_map.get(&account_info.store_id)) .and_then(|store| { let account = store.accounts.get_account(account_info.offset)?.0; let balance = Self::account_balance_for_capitalization( account_info.lamports, &account.account_meta.owner, account.account_meta.executable, ); if check_hash { let hash = Self::hash_stored_account( *slot, &account, &self .cluster_type .expect("Cluster type must be set at initialization"), ); if hash != *account.hash { mismatch_found.fetch_add(1, Ordering::Relaxed); return None; } } Some((**pubkey, *account.hash, balance)) }) } else { None } } else { None } }) .collect(); if mismatch_found.load(Ordering::Relaxed) > 0 { warn!( "{} mismatched account hash(es) found", mismatch_found.load(Ordering::Relaxed) ); return Err(MismatchedAccountHash); } scan.stop(); let hash_total = hashes.len(); let mut accumulate = Measure::start("accumulate"); let (accumulated_hash, total_lamports) = Self::accumulate_account_hashes_and_capitalization(hashes); accumulate.stop(); datapoint_info!( "update_accounts_hash", ("accounts_scan", scan.as_us(), i64), ("hash_accumulate", accumulate.as_us(), i64), ("hash_total", hash_total, i64), ); Ok((accumulated_hash, total_lamports)) } pub fn get_accounts_hash(&self, slot: Slot) -> Hash { let bank_hashes = self.bank_hashes.read().unwrap(); let bank_hash_info = bank_hashes.get(&slot).unwrap(); bank_hash_info.snapshot_hash } pub fn update_accounts_hash(&self, slot: Slot, ancestors: &Ancestors) -> (Hash, u64) { let (hash, total_lamports) = self.calculate_accounts_hash(ancestors, false).unwrap(); let mut bank_hashes = self.bank_hashes.write().unwrap(); let mut bank_hash_info = bank_hashes.get_mut(&slot).unwrap(); bank_hash_info.snapshot_hash = hash; (hash, total_lamports) } pub fn verify_bank_hash_and_lamports( &self, slot: Slot, ancestors: &Ancestors, total_lamports: u64, ) -> Result<(), BankHashVerificationError> { use BankHashVerificationError::*; let (calculated_hash, calculated_lamports) = self.calculate_accounts_hash(ancestors, true)?; if calculated_lamports != total_lamports { warn!( "Mismatched total lamports: {} calculated: {}", total_lamports, calculated_lamports ); return Err(MismatchedTotalLamports(calculated_lamports, total_lamports)); } let bank_hashes = self.bank_hashes.read().unwrap(); if let Some(found_hash_info) = bank_hashes.get(&slot) { if calculated_hash == found_hash_info.snapshot_hash { Ok(()) } else { warn!( "mismatched bank hash for slot {}: {} (calculated) != {} (expected)", slot, calculated_hash, found_hash_info.snapshot_hash ); Err(MismatchedBankHash) } } else { Err(MissingBankHash) } } pub fn get_accounts_delta_hash(&self, slot: Slot) -> Hash { let mut scan = Measure::start("scan"); let mut accumulator: Vec<HashMap<Pubkey, (u64, Hash)>> = self.scan_account_storage( slot, |stored_account: &StoredAccount, _store_id: AppendVecId, accum: &mut HashMap<Pubkey, (u64, Hash)>| { accum.insert( stored_account.meta.pubkey, (stored_account.meta.write_version, *stored_account.hash), ); }, ); scan.stop(); let mut merge = Measure::start("merge"); let mut account_maps = accumulator.pop().unwrap(); while let Some(maps) = accumulator.pop() { AccountsDB::merge(&mut account_maps, &maps); } merge.stop(); let mut accumulate = Measure::start("accumulate"); let hashes: Vec<_> = account_maps .into_iter() .map(|(pubkey, (_, hash))| (pubkey, hash, 0)) .collect(); let ret = Self::accumulate_account_hashes(hashes); accumulate.stop(); self.stats .delta_hash_scan_time_total_us .fetch_add(scan.as_us(), Ordering::Relaxed); self.stats .delta_hash_merge_time_total_us .fetch_add(merge.as_us(), Ordering::Relaxed); self.stats .delta_hash_accumulate_time_total_us .fetch_add(accumulate.as_us(), Ordering::Relaxed); self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed); ret } fn update_index( &self, slot: Slot, infos: Vec<AccountInfo>, accounts: &[(&Pubkey, &Account)], ) -> SlotList<AccountInfo> { let mut reclaims = SlotList::<AccountInfo>::with_capacity(infos.len() * 2); let index = self.accounts_index.read().unwrap(); let mut update_index_work = Measure::start("update_index_work"); let inserts: Vec<_> = infos .into_iter() .zip(accounts.iter()) .filter_map(|(info, pubkey_account)| { let pubkey = pubkey_account.0; index .update(slot, pubkey, info, &mut reclaims) .map(|info| (pubkey, info)) }) .collect(); drop(index); if !inserts.is_empty() { let mut index = self.accounts_index.write().unwrap(); for (pubkey, info) in inserts { index.insert(slot, pubkey, info, &mut reclaims); } } update_index_work.stop(); reclaims } fn remove_dead_accounts(&self, reclaims: SlotSlice<AccountInfo>) -> HashSet<Slot> { let storage = self.storage.read().unwrap(); let mut dead_slots = HashSet::new(); for (slot, account_info) in reclaims { if let Some(slot_storage) = storage.0.get(slot) { if let Some(store) = slot_storage.get(&account_info.store_id) { assert_eq!( *slot, store.slot, "AccountDB::accounts_index corrupted. Storage should only point to one slot" ); let count = store.remove_account(); if count == 0 { dead_slots.insert(*slot); } } } } dead_slots.retain(|slot| { if let Some(slot_storage) = storage.0.get(&slot) { for x in slot_storage.values() { if x.count() != 0 { return false; } } } true }); dead_slots } pub fn clean_dead_slots(&self, dead_slots: &HashSet<Slot>) { if !dead_slots.is_empty() { { let mut measure = Measure::start("clean_dead_slots-ms"); let storage = self.storage.read().unwrap(); let mut stores: Vec<Arc<AccountStorageEntry>> = vec![]; for slot in dead_slots.iter() { if let Some(slot_storage) = storage.0.get(slot) { for store in slot_storage.values() { stores.push(store.clone()); } } } drop(storage); datapoint_debug!("clean_dead_slots", ("stores", stores.len(), i64)); let pubkeys: Vec<Vec<Pubkey>> = { self.thread_pool_clean.install(|| { stores .into_par_iter() .map(|store| { let accounts = store.accounts.accounts(0); accounts .into_iter() .map(|account| account.meta.pubkey) .collect::<Vec<Pubkey>>() }) .collect() }) }; let index = self.accounts_index.read().unwrap(); for pubkey_v in pubkeys { for pubkey in pubkey_v { index.unref_from_storage(&pubkey); } } drop(index); measure.stop(); inc_new_counter_info!("clean_dead_slots-unref-ms", measure.as_ms() as usize); let mut index = self.accounts_index.write().unwrap(); for slot in dead_slots.iter() { index.clean_dead_slot(*slot); } } { let mut bank_hashes = self.bank_hashes.write().unwrap(); for slot in dead_slots.iter() { bank_hashes.remove(slot); } } } } fn hash_accounts( &self, slot: Slot, accounts: &[(&Pubkey, &Account)], cluster_type: &ClusterType, ) -> Vec<Hash> { let mut stats = BankHashStats::default(); let hashes: Vec<_> = accounts .iter() .map(|(pubkey, account)| { stats.update(account); Self::hash_account(slot, account, pubkey, cluster_type) }) .collect(); let mut bank_hashes = self.bank_hashes.write().unwrap(); let slot_info = bank_hashes .entry(slot) .or_insert_with(BankHashInfo::default); slot_info.stats.merge(&stats); hashes } pub(crate) fn freeze_accounts(&mut self, ancestors: &Ancestors, account_pubkeys: &[Pubkey]) { for account_pubkey in account_pubkeys { if let Some((account, _slot)) = self.load_slow(ancestors, &account_pubkey) { let frozen_account_info = FrozenAccountInfo { hash: Self::hash_frozen_account_data(&account), lamports: account.lamports, }; warn!( "Account {} is now frozen at lamports={}, hash={}", account_pubkey, frozen_account_info.lamports, frozen_account_info.hash ); self.frozen_accounts .insert(*account_pubkey, frozen_account_info); } else { panic!( "Unable to freeze an account that does not exist: {}", account_pubkey ); } } } /// Cause a panic if frozen accounts would be affected by data in `accounts` fn assert_frozen_accounts(&self, accounts: &[(&Pubkey, &Account)]) { if self.frozen_accounts.is_empty() { return; } for (account_pubkey, account) in accounts.iter() { if let Some(frozen_account_info) = self.frozen_accounts.get(*account_pubkey) { if account.lamports < frozen_account_info.lamports { FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed); panic!( "Frozen account {} modified. Lamports decreased from {} to {}", account_pubkey, frozen_account_info.lamports, account.lamports, ) } let hash = Self::hash_frozen_account_data(&account); if hash != frozen_account_info.hash { FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed); panic!( "Frozen account {} modified. Hash changed from {} to {}", account_pubkey, frozen_account_info.hash, hash, ) } } } } /// Store the account update. pub fn store(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) { self.assert_frozen_accounts(accounts); let hashes = self.hash_accounts( slot, accounts, &self .cluster_type .expect("Cluster type must be set at initialization"), ); self.store_with_hashes(slot, accounts, &hashes); } fn store_with_hashes(&self, slot: Slot, accounts: &[(&Pubkey, &Account)], hashes: &[Hash]) { let mut store_accounts = Measure::start("store::store_accounts"); let infos = self.store_accounts(slot, accounts, hashes); store_accounts.stop(); let mut update_index = Measure::start("store::update_index"); let reclaims = self.update_index(slot, infos, accounts); update_index.stop(); trace!("reclaim: {}", reclaims.len()); self.handle_reclaims_maybe_cleanup(&reclaims); } pub fn add_root(&self, slot: Slot) { self.accounts_index.write().unwrap().add_root(slot) } pub fn get_snapshot_storages(&self, snapshot_slot: Slot) -> SnapshotStorages { let accounts_index = self.accounts_index.read().unwrap(); let r_storage = self.storage.read().unwrap(); r_storage .0 .iter() .filter(|(slot, _slot_stores)| { **slot <= snapshot_slot && accounts_index.is_root(**slot) }) .map(|(_slot, slot_stores)| { slot_stores .values() .filter(|x| x.has_accounts()) .cloned() .collect() }) .filter(|snapshot_storage: &SnapshotStorage| !snapshot_storage.is_empty()) .collect() } fn merge<X>(dest: &mut HashMap<Pubkey, X>, source: &HashMap<Pubkey, X>) where X: Versioned + Clone, { for (key, source_item) in source.iter() { if let Some(dest_item) = dest.get(key) { if dest_item.version() > source_item.version() { continue; } } dest.insert(*key, source_item.clone()); } } pub fn generate_index(&self) { let mut accounts_index = self.accounts_index.write().unwrap(); let storage = self.storage.read().unwrap(); let mut slots: Vec<Slot> = storage.0.keys().cloned().collect(); slots.sort(); let mut last_log_update = Instant::now(); for (index, slot) in slots.iter().enumerate() { let now = Instant::now(); if now.duration_since(last_log_update).as_secs() >= 10 { info!("generating index: {}/{} slots...", index, slots.len()); last_log_update = now; } let accumulator: Vec<HashMap<Pubkey, Vec<(u64, AccountInfo)>>> = self .scan_account_storage_inner( *slot, |stored_account: &StoredAccount, store_id: AppendVecId, accum: &mut HashMap<Pubkey, Vec<(u64, AccountInfo)>>| { let account_info = AccountInfo { store_id, offset: stored_account.offset, lamports: stored_account.account_meta.lamports, }; let entry = accum .entry(stored_account.meta.pubkey) .or_insert_with(Vec::new); entry.push((stored_account.meta.write_version, account_info)); }, &storage, ); let mut accounts_map: HashMap<Pubkey, Vec<(u64, AccountInfo)>> = HashMap::new(); for accumulator_entry in accumulator.iter() { for (pubkey, storage_entry) in accumulator_entry { let entry = accounts_map.entry(*pubkey).or_insert_with(Vec::new); entry.extend(storage_entry.iter().cloned()); } } // Need to restore indexes even with older write versions which may // be shielding other accounts. When they are then purged, the // original non-shielded account value will be visible when the account // is restored from the append-vec if !accumulator.is_empty() { let mut _reclaims: Vec<(u64, AccountInfo)> = vec![]; for (pubkey, account_infos) in accounts_map.iter_mut() { account_infos.sort_by(|a, b| a.0.cmp(&b.0)); for (_, account_info) in account_infos { accounts_index.insert(*slot, pubkey, account_info.clone(), &mut _reclaims); } } } } // Need to add these last, otherwise older updates will be cleaned for slot in slots { accounts_index.add_root(slot); } let mut counts = HashMap::new(); for slot_list in accounts_index.account_maps.values() { for (_slot, account_entry) in slot_list.1.read().unwrap().iter() { *counts.entry(account_entry.store_id).or_insert(0) += 1; } } for slot_stores in storage.0.values() { for (id, store) in slot_stores { if let Some(count) = counts.get(&id) { trace!( "id: {} setting count: {} cur: {}", id, count, store.count_and_status.read().unwrap().0 ); store.count_and_status.write().unwrap().0 = *count; } else { trace!("id: {} clearing count", id); store.count_and_status.write().unwrap().0 = 0; } store .approx_store_count .store(store.accounts.accounts(0).len(), Ordering::Relaxed); } } } pub(crate) fn print_accounts_stats(&self, label: &'static str) { self.print_index(label); self.print_count_and_status(label); } fn print_index(&self, label: &'static str) { let mut roots: Vec<_> = self .accounts_index .read() .unwrap() .roots .iter() .cloned() .collect(); roots.sort(); info!("{}: accounts_index roots: {:?}", label, roots,); for (pubkey, list) in &self.accounts_index.read().unwrap().account_maps { info!(" key: {}", pubkey); info!(" slots: {:?}", *list.1.read().unwrap()); } } fn print_count_and_status(&self, label: &'static str) { let storage = self.storage.read().unwrap(); let mut slots: Vec<_> = storage.0.keys().cloned().collect(); slots.sort(); info!("{}: count_and status for {} slots:", label, slots.len()); for slot in &slots { let slot_stores = storage.0.get(slot).unwrap(); let mut ids: Vec<_> = slot_stores.keys().cloned().collect(); ids.sort(); for id in &ids { let entry = slot_stores.get(id).unwrap(); info!( " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}", slot, id, *entry.count_and_status.read().unwrap(), entry.approx_store_count.load(Ordering::Relaxed), entry.accounts.len(), entry.accounts.capacity(), ); } } } } #[cfg(test)] pub mod tests { // TODO: all the bank tests are bank specific, issue: 2194 use super::*; use crate::{accounts_index::RefCount, append_vec::AccountMeta}; use assert_matches::assert_matches; use rand::{thread_rng, Rng}; use solana_sdk::{account::Account, hash::HASH_BYTES}; use std::{fs, str::FromStr}; fn linear_ancestors(end_slot: u64) -> Ancestors { let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect(); for i in 1..end_slot { ancestors.insert(i, (i - 1) as usize); } ancestors } #[test] fn test_accountsdb_add_root() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); db.store(0, &[(&key, &account0)]); db.add_root(0); let ancestors = vec![(1, 1)].into_iter().collect(); assert_eq!(db.load_slow(&ancestors, &key), Some((account0, 0))); } #[test] fn test_accountsdb_latest_ancestor() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); db.store(0, &[(&key, &account0)]); let account1 = Account::new(0, 0, &key); db.store(1, &[(&key, &account1)]); let ancestors = vec![(1, 1)].into_iter().collect(); assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1); let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1); let accounts: Vec<Account> = db.scan_accounts(&ancestors, |accounts: &mut Vec<Account>, option| { if let Some(data) = option { accounts.push(data.1); } }); assert_eq!(accounts, vec![account1]); } #[test] fn test_accountsdb_latest_ancestor_with_root() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); db.store(0, &[(&key, &account0)]); let account1 = Account::new(0, 0, &key); db.store(1, &[(&key, &account1)]); db.add_root(0); let ancestors = vec![(1, 1)].into_iter().collect(); assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1); let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1); } #[test] fn test_accountsdb_root_one_slot() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); // store value 1 in the "root", i.e. db zero db.store(0, &[(&key, &account0)]); // now we have: // // root0 -> key.lamports==1 // / \ // / \ // key.lamports==0 <- slot1 \ // slot2 -> key.lamports==1 // (via root0) // store value 0 in one child let account1 = Account::new(0, 0, &key); db.store(1, &[(&key, &account1)]); // masking accounts is done at the Accounts level, at accountsDB we see // original account (but could also accept "None", which is implemented // at the Accounts level) let ancestors = vec![(0, 0), (1, 1)].into_iter().collect(); assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1); // we should see 1 token in slot 2 let ancestors = vec![(0, 0), (2, 2)].into_iter().collect(); assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account0); db.add_root(0); let ancestors = vec![(1, 1)].into_iter().collect(); assert_eq!(db.load_slow(&ancestors, &key), Some((account1, 1))); let ancestors = vec![(2, 2)].into_iter().collect(); assert_eq!(db.load_slow(&ancestors, &key), Some((account0, 0))); // original value } #[test] fn test_accountsdb_add_root_many() { let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let mut pubkeys: Vec<Pubkey> = vec![]; create_account(&db, &mut pubkeys, 0, 100, 0, 0); for _ in 1..100 { let idx = thread_rng().gen_range(0, 99); let ancestors = vec![(0, 0)].into_iter().collect(); let account = db.load_slow(&ancestors, &pubkeys[idx]).unwrap(); let mut default_account = Account::default(); default_account.lamports = (idx + 1) as u64; assert_eq!((default_account, 0), account); } db.add_root(0); // check that all the accounts appear with a new root for _ in 1..100 { let idx = thread_rng().gen_range(0, 99); let ancestors = vec![(0, 0)].into_iter().collect(); let account0 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap(); let ancestors = vec![(1, 1)].into_iter().collect(); let account1 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap(); let mut default_account = Account::default(); default_account.lamports = (idx + 1) as u64; assert_eq!(&default_account, &account0.0); assert_eq!(&default_account, &account1.0); } } #[test] fn test_accountsdb_count_stores() { solana_logger::setup(); let db = AccountsDB::new_single(); let mut pubkeys: Vec<Pubkey> = vec![]; create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0); assert!(check_storage(&db, 0, 2)); let pubkey = Pubkey::new_rand(); let account = Account::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey); db.store(1, &[(&pubkey, &account)]); db.store(1, &[(&pubkeys[0], &account)]); { let stores = db.storage.read().unwrap(); let slot_0_stores = &stores.0.get(&0).unwrap(); let slot_1_stores = &stores.0.get(&1).unwrap(); assert_eq!(slot_0_stores.len(), 1); assert_eq!(slot_1_stores.len(), 1); assert_eq!(slot_0_stores[&0].count(), 2); assert_eq!(slot_1_stores[&1].count(), 2); assert_eq!(slot_0_stores[&0].approx_stored_count(), 2); assert_eq!(slot_1_stores[&1].approx_stored_count(), 2); } // adding root doesn't change anything db.add_root(1); { let stores = db.storage.read().unwrap(); let slot_0_stores = &stores.0.get(&0).unwrap(); let slot_1_stores = &stores.0.get(&1).unwrap(); assert_eq!(slot_0_stores.len(), 1); assert_eq!(slot_1_stores.len(), 1); assert_eq!(slot_0_stores[&0].count(), 2); assert_eq!(slot_1_stores[&1].count(), 2); assert_eq!(slot_0_stores[&0].approx_stored_count(), 2); assert_eq!(slot_1_stores[&1].approx_stored_count(), 2); } // overwrite old rooted account version; only the slot_0_stores.count() should be // decremented db.store(2, &[(&pubkeys[0], &account)]); { let stores = db.storage.read().unwrap(); let slot_0_stores = &stores.0.get(&0).unwrap(); let slot_1_stores = &stores.0.get(&1).unwrap(); assert_eq!(slot_0_stores.len(), 1); assert_eq!(slot_1_stores.len(), 1); assert_eq!(slot_0_stores[&0].count(), 1); assert_eq!(slot_1_stores[&1].count(), 2); assert_eq!(slot_0_stores[&0].approx_stored_count(), 2); assert_eq!(slot_1_stores[&1].approx_stored_count(), 2); } } #[test] fn test_accounts_unsquashed() { let key = Pubkey::default(); // 1 token in the "root", i.e. db zero let db0 = AccountsDB::new(Vec::new(), &ClusterType::Development); let account0 = Account::new(1, 0, &key); db0.store(0, &[(&key, &account0)]); // 0 lamports in the child let account1 = Account::new(0, 0, &key); db0.store(1, &[(&key, &account1)]); // masking accounts is done at the Accounts level, at accountsDB we see // original account let ancestors = vec![(0, 0), (1, 1)].into_iter().collect(); assert_eq!(db0.load_slow(&ancestors, &key), Some((account1, 1))); let ancestors = vec![(0, 0)].into_iter().collect(); assert_eq!(db0.load_slow(&ancestors, &key), Some((account0, 0))); } #[test] fn test_remove_unrooted_slot() { let unrooted_slot = 9; let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); let ancestors: HashMap<_, _> = vec![(unrooted_slot, 1)].into_iter().collect(); db.store(unrooted_slot, &[(&key, &account0)]); db.bank_hashes .write() .unwrap() .insert(unrooted_slot, BankHashInfo::default()); assert!(db .accounts_index .read() .unwrap() .get(&key, Some(&ancestors)) .is_some()); assert_load_account(&db, unrooted_slot, key, 1); // Purge the slot db.remove_unrooted_slot(unrooted_slot); assert!(db.load_slow(&ancestors, &key).is_none()); assert!(db.bank_hashes.read().unwrap().get(&unrooted_slot).is_none()); assert!(db.storage.read().unwrap().0.get(&unrooted_slot).is_none()); assert!(db .accounts_index .read() .unwrap() .account_maps .get(&key) .map(|pubkey_entry| pubkey_entry.1.read().unwrap().is_empty()) .unwrap_or(true)); assert!(db .accounts_index .read() .unwrap() .get(&key, Some(&ancestors)) .is_none()); // Test we can store for the same slot again and get the right information let account0 = Account::new(2, 0, &key); db.store(unrooted_slot, &[(&key, &account0)]); assert_load_account(&db, unrooted_slot, key, 2); } #[test] fn test_remove_unrooted_slot_snapshot() { let unrooted_slot = 9; let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::new_rand(); let account0 = Account::new(1, 0, &key); db.store(unrooted_slot, &[(&key, &account0)]); // Purge the slot db.remove_unrooted_slot(unrooted_slot); // Add a new root let key2 = Pubkey::new_rand(); let new_root = unrooted_slot + 1; db.store(new_root, &[(&key2, &account0)]); db.add_root(new_root); // Simulate reconstruction from snapshot let db = reconstruct_accounts_db_via_serialization(&db, new_root); // Check root account exists assert_load_account(&db, new_root, key2, 1); // Check purged account stays gone let unrooted_slot_ancestors: HashMap<_, _> = vec![(unrooted_slot, 1)].into_iter().collect(); assert!(db.load_slow(&unrooted_slot_ancestors, &key).is_none()); } fn create_account( accounts: &AccountsDB, pubkeys: &mut Vec<Pubkey>, slot: Slot, num: usize, space: usize, num_vote: usize, ) { let ancestors = vec![(slot, 0)].into_iter().collect(); for t in 0..num { let pubkey = Pubkey::new_rand(); let account = Account::new((t + 1) as u64, space, &Account::default().owner); pubkeys.push(pubkey); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); accounts.store(slot, &[(&pubkey, &account)]); } for t in 0..num_vote { let pubkey = Pubkey::new_rand(); let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id()); pubkeys.push(pubkey); let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); accounts.store(slot, &[(&pubkey, &account)]); } } fn update_accounts(accounts: &AccountsDB, pubkeys: &[Pubkey], slot: Slot, range: usize) { for _ in 1..1000 { let idx = thread_rng().gen_range(0, range); let ancestors = vec![(slot, 0)].into_iter().collect(); if let Some((mut account, _)) = accounts.load_slow(&ancestors, &pubkeys[idx]) { account.lamports += 1; accounts.store(slot, &[(&pubkeys[idx], &account)]); if account.lamports == 0 { let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkeys[idx]).is_none()); } else { let mut default_account = Account::default(); default_account.lamports = account.lamports; assert_eq!(default_account, account); } } } } fn check_storage(accounts: &AccountsDB, slot: Slot, count: usize) -> bool { let storage = accounts.storage.read().unwrap(); assert_eq!(storage.0[&slot].len(), 1); let slot_storage = storage.0.get(&slot).unwrap(); let mut total_count: usize = 0; for store in slot_storage.values() { assert_eq!(store.status(), AccountStorageStatus::Available); total_count += store.count(); } assert_eq!(total_count, count); let (expected_store_count, actual_store_count): (usize, usize) = ( slot_storage.values().map(|s| s.approx_stored_count()).sum(), slot_storage .values() .map(|s| s.accounts.accounts(0).len()) .sum(), ); assert_eq!(expected_store_count, actual_store_count); total_count == count } fn check_accounts( accounts: &AccountsDB, pubkeys: &[Pubkey], slot: Slot, num: usize, count: usize, ) { let ancestors = vec![(slot, 0)].into_iter().collect(); for _ in 0..num { let idx = thread_rng().gen_range(0, num); let account = accounts.load_slow(&ancestors, &pubkeys[idx]); let account1 = Some(( Account::new((idx + count) as u64, 0, &Account::default().owner), slot, )); assert_eq!(account, account1); } } #[allow(clippy::needless_range_loop)] fn modify_accounts( accounts: &AccountsDB, pubkeys: &[Pubkey], slot: Slot, num: usize, count: usize, ) { for idx in 0..num { let account = Account::new((idx + count) as u64, 0, &Account::default().owner); accounts.store(slot, &[(&pubkeys[idx], &account)]); } } #[test] fn test_account_one() { let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let db = AccountsDB::new(paths, &ClusterType::Development); let mut pubkeys: Vec<Pubkey> = vec![]; create_account(&db, &mut pubkeys, 0, 1, 0, 0); let ancestors = vec![(0, 0)].into_iter().collect(); let account = db.load_slow(&ancestors, &pubkeys[0]).unwrap(); let mut default_account = Account::default(); default_account.lamports = 1; assert_eq!((default_account, 0), account); } #[test] fn test_account_many() { let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap(); let db = AccountsDB::new(paths, &ClusterType::Development); let mut pubkeys: Vec<Pubkey> = vec![]; create_account(&db, &mut pubkeys, 0, 100, 0, 0); check_accounts(&db, &pubkeys, 0, 100, 1); } #[test] fn test_account_update() { let accounts = AccountsDB::new_single(); let mut pubkeys: Vec<Pubkey> = vec![]; create_account(&accounts, &mut pubkeys, 0, 100, 0, 0); update_accounts(&accounts, &pubkeys, 0, 99); assert_eq!(check_storage(&accounts, 0, 100), true); } #[test] fn test_account_grow_many() { let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap(); let size = 4096; let accounts = AccountsDB::new_sized(paths, size); let mut keys = vec![]; for i in 0..9 { let key = Pubkey::new_rand(); let account = Account::new(i + 1, size as usize / 4, &key); accounts.store(0, &[(&key, &account)]); keys.push(key); } let ancestors = vec![(0, 0)].into_iter().collect(); for (i, key) in keys.iter().enumerate() { assert_eq!( accounts.load_slow(&ancestors, &key).unwrap().0.lamports, (i as u64) + 1 ); } let mut append_vec_histogram = HashMap::new(); for storage in accounts .storage .read() .unwrap() .0 .values() .flat_map(|x| x.values()) { *append_vec_histogram.entry(storage.slot).or_insert(0) += 1; } for count in append_vec_histogram.values() { assert!(*count >= 2); } } #[test] fn test_account_grow() { let accounts = AccountsDB::new_single(); let count = [0, 1]; let status = [AccountStorageStatus::Available, AccountStorageStatus::Full]; let pubkey1 = Pubkey::new_rand(); let account1 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1); accounts.store(0, &[(&pubkey1, &account1)]); { let stores = accounts.storage.read().unwrap(); assert_eq!(stores.0.len(), 1); assert_eq!(stores.0[&0][&0].count(), 1); assert_eq!(stores.0[&0][&0].status(), AccountStorageStatus::Available); } let pubkey2 = Pubkey::new_rand(); let account2 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2); accounts.store(0, &[(&pubkey2, &account2)]); { let stores = accounts.storage.read().unwrap(); assert_eq!(stores.0.len(), 1); assert_eq!(stores.0[&0].len(), 2); assert_eq!(stores.0[&0][&0].count(), 1); assert_eq!(stores.0[&0][&0].status(), AccountStorageStatus::Full); assert_eq!(stores.0[&0][&1].count(), 1); assert_eq!(stores.0[&0][&1].status(), AccountStorageStatus::Available); } let ancestors = vec![(0, 0)].into_iter().collect(); assert_eq!( accounts.load_slow(&ancestors, &pubkey1).unwrap().0, account1 ); assert_eq!( accounts.load_slow(&ancestors, &pubkey2).unwrap().0, account2 ); // lots of stores, but 3 storages should be enough for everything for i in 0..25 { let index = i % 2; accounts.store(0, &[(&pubkey1, &account1)]); { let stores = accounts.storage.read().unwrap(); assert_eq!(stores.0.len(), 1); assert_eq!(stores.0[&0].len(), 3); assert_eq!(stores.0[&0][&0].count(), count[index]); assert_eq!(stores.0[&0][&0].status(), status[0]); assert_eq!(stores.0[&0][&1].count(), 1); assert_eq!(stores.0[&0][&1].status(), status[1]); assert_eq!(stores.0[&0][&2].count(), count[index ^ 1]); assert_eq!(stores.0[&0][&2].status(), status[0]); } let ancestors = vec![(0, 0)].into_iter().collect(); assert_eq!( accounts.load_slow(&ancestors, &pubkey1).unwrap().0, account1 ); assert_eq!( accounts.load_slow(&ancestors, &pubkey2).unwrap().0, account2 ); } } #[test] fn test_purge_slot_not_root() { let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let mut pubkeys: Vec<Pubkey> = vec![]; create_account(&accounts, &mut pubkeys, 0, 1, 0, 0); let ancestors = vec![(0, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_some()); accounts.purge_slot(0); assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_none()); } #[test] fn test_purge_slot_after_root() { let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let mut pubkeys: Vec<Pubkey> = vec![]; create_account(&accounts, &mut pubkeys, 0, 1, 0, 0); let ancestors = vec![(0, 0)].into_iter().collect(); accounts.add_root(0); accounts.purge_slot(0); assert!(accounts.load_slow(&ancestors, &pubkeys[0]).is_some()); } #[test] fn test_lazy_gc_slot() { solana_logger::setup(); //This test is pedantic //A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is //not root, it means we are retaining dead banks. let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let pubkey = Pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account accounts.store(0, &[(&pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); let id = { let index = accounts.accounts_index.read().unwrap(); let (list, idx) = index.get(&pubkey, Some(&ancestors)).unwrap(); list[idx].1.store_id }; accounts.add_root(1); //slot is still there, since gc is lazy assert!(accounts.storage.read().unwrap().0[&0].get(&id).is_some()); //store causes clean accounts.store(1, &[(&pubkey, &account)]); //slot is gone accounts.print_accounts_stats("pre-clean"); accounts.clean_accounts(); accounts.process_dead_slots(None); assert!(accounts.storage.read().unwrap().0.get(&0).is_none()); //new value is there let ancestors = vec![(1, 1)].into_iter().collect(); assert_eq!(accounts.load_slow(&ancestors, &pubkey), Some((account, 1))); } impl AccountsDB { fn alive_account_count_in_store(&self, slot: Slot) -> usize { let storage = self.storage.read().unwrap(); let slot_storage = storage.0.get(&slot); if let Some(slot_storage) = slot_storage { slot_storage.values().map(|store| store.count()).sum() } else { 0 } } fn all_account_count_in_append_vec(&self, slot: Slot) -> usize { let storage = self.storage.read().unwrap(); let slot_storage = storage.0.get(&slot); if let Some(slot_storage) = slot_storage { let count = slot_storage .values() .map(|store| store.accounts.accounts(0).len()) .sum(); let stored_count: usize = slot_storage .values() .map(|store| store.approx_stored_count()) .sum(); assert_eq!(stored_count, count); count } else { 0 } } fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount { self.accounts_index .read() .unwrap() .ref_count_from_storage(&pubkey) } fn uncleaned_root_count(&self) -> usize { self.accounts_index.read().unwrap().uncleaned_roots.len() } } #[test] fn test_clean_old_with_normal_account() { solana_logger::setup(); let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let pubkey = Pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account accounts.store(0, &[(&pubkey, &account)]); accounts.store(1, &[(&pubkey, &account)]); // simulate slots are rooted after while accounts.add_root(0); accounts.add_root(1); //even if rooted, old state isn't cleaned up assert_eq!(accounts.alive_account_count_in_store(0), 1); assert_eq!(accounts.alive_account_count_in_store(1), 1); accounts.clean_accounts(); //now old state is cleaned up assert_eq!(accounts.alive_account_count_in_store(0), 0); assert_eq!(accounts.alive_account_count_in_store(1), 1); } #[test] fn test_clean_old_with_zero_lamport_account() { solana_logger::setup(); let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let pubkey1 = Pubkey::new_rand(); let pubkey2 = Pubkey::new_rand(); let normal_account = Account::new(1, 0, &Account::default().owner); let zero_account = Account::new(0, 0, &Account::default().owner); //store an account accounts.store(0, &[(&pubkey1, &normal_account)]); accounts.store(1, &[(&pubkey1, &zero_account)]); accounts.store(0, &[(&pubkey2, &normal_account)]); accounts.store(1, &[(&pubkey2, &normal_account)]); //simulate slots are rooted after while accounts.add_root(0); accounts.add_root(1); //even if rooted, old state isn't cleaned up assert_eq!(accounts.alive_account_count_in_store(0), 2); assert_eq!(accounts.alive_account_count_in_store(1), 2); accounts.clean_accounts(); //still old state behind zero-lamport account isn't cleaned up assert_eq!(accounts.alive_account_count_in_store(0), 1); assert_eq!(accounts.alive_account_count_in_store(1), 2); } #[test] fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { solana_logger::setup(); let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let pubkey1 = Pubkey::new_rand(); let pubkey2 = Pubkey::new_rand(); let normal_account = Account::new(1, 0, &Account::default().owner); let zero_account = Account::new(0, 0, &Account::default().owner); //store an account accounts.store(0, &[(&pubkey1, &normal_account)]); accounts.store(1, &[(&pubkey1, &zero_account)]); accounts.store(0, &[(&pubkey2, &normal_account)]); accounts.store(2, &[(&pubkey2, &normal_account)]); //simulate slots are rooted after while accounts.add_root(0); accounts.add_root(1); accounts.add_root(2); //even if rooted, old state isn't cleaned up assert_eq!(accounts.alive_account_count_in_store(0), 2); assert_eq!(accounts.alive_account_count_in_store(1), 1); assert_eq!(accounts.alive_account_count_in_store(2), 1); accounts.clean_accounts(); //both zero lamport and normal accounts are cleaned up assert_eq!(accounts.alive_account_count_in_store(0), 0); assert_eq!(accounts.alive_account_count_in_store(1), 0); assert_eq!(accounts.alive_account_count_in_store(2), 1); } #[test] fn test_uncleaned_roots_with_account() { solana_logger::setup(); let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let pubkey = Pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account accounts.store(0, &[(&pubkey, &account)]); assert_eq!(accounts.uncleaned_root_count(), 0); // simulate slots are rooted after while accounts.add_root(0); assert_eq!(accounts.uncleaned_root_count(), 1); //now uncleaned roots are cleaned up accounts.clean_accounts(); assert_eq!(accounts.uncleaned_root_count(), 0); } #[test] fn test_uncleaned_roots_with_no_account() { solana_logger::setup(); let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); assert_eq!(accounts.uncleaned_root_count(), 0); // simulate slots are rooted after while accounts.add_root(0); assert_eq!(accounts.uncleaned_root_count(), 1); //now uncleaned roots are cleaned up accounts.clean_accounts(); assert_eq!(accounts.uncleaned_root_count(), 0); } #[test] fn test_accounts_db_serialize1() { solana_logger::setup(); let accounts = AccountsDB::new_single(); let mut pubkeys: Vec<Pubkey> = vec![]; // Create 100 accounts in slot 0 create_account(&accounts, &mut pubkeys, 0, 100, 0, 0); assert_eq!(check_storage(&accounts, 0, 100), true); check_accounts(&accounts, &pubkeys, 0, 100, 1); // do some updates to those accounts and re-check modify_accounts(&accounts, &pubkeys, 0, 100, 2); check_accounts(&accounts, &pubkeys, 0, 100, 2); accounts.add_root(0); let mut pubkeys1: Vec<Pubkey> = vec![]; let latest_slot = 1; // Modify the first 10 of the slot 0 accounts as updates in slot 1 modify_accounts(&accounts, &pubkeys, latest_slot, 10, 3); // Create 10 new accounts in slot 1 create_account(&accounts, &mut pubkeys1, latest_slot, 10, 0, 0); // Store a lamports=0 account in slot 1 let account = Account::new(0, 0, &Account::default().owner); accounts.store(latest_slot, &[(&pubkeys[30], &account)]); accounts.add_root(latest_slot); info!("added root 1"); let latest_slot = 2; let mut pubkeys2: Vec<Pubkey> = vec![]; // Modify original slot 0 accounts in slot 2 modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4); // Create 10 new accounts in slot 2 create_account(&accounts, &mut pubkeys2, latest_slot, 10, 0, 0); // Store a lamports=0 account in slot 2 let account = Account::new(0, 0, &Account::default().owner); accounts.store(latest_slot, &[(&pubkeys[31], &account)]); accounts.add_root(latest_slot); assert!(check_storage(&accounts, 0, 90)); assert!(check_storage(&accounts, 1, 21)); assert!(check_storage(&accounts, 2, 31)); let daccounts = reconstruct_accounts_db_via_serialization(&accounts, latest_slot); assert_eq!( daccounts.write_version.load(Ordering::Relaxed), accounts.write_version.load(Ordering::Relaxed) ); assert_eq!( daccounts.next_id.load(Ordering::Relaxed), accounts.next_id.load(Ordering::Relaxed) ); // Get the hash for the latest slot, which should be the only hash in the // bank_hashes map on the deserialized AccountsDb assert_eq!(daccounts.bank_hashes.read().unwrap().len(), 2); assert_eq!( daccounts.bank_hashes.read().unwrap().get(&latest_slot), accounts.bank_hashes.read().unwrap().get(&latest_slot) ); daccounts.print_count_and_status("daccounts"); // Don't check the first 35 accounts which have not been modified on slot 0 check_accounts(&daccounts, &pubkeys[35..], 0, 65, 37); check_accounts(&daccounts, &pubkeys1, 1, 10, 1); assert!(check_storage(&daccounts, 0, 100)); assert!(check_storage(&daccounts, 1, 21)); assert!(check_storage(&daccounts, 2, 31)); let ancestors = linear_ancestors(latest_slot); assert_eq!( daccounts.update_accounts_hash(latest_slot, &ancestors), accounts.update_accounts_hash(latest_slot, &ancestors) ); } fn assert_load_account( accounts: &AccountsDB, slot: Slot, pubkey: Pubkey, expected_lamports: u64, ) { let ancestors = vec![(slot, 0)].into_iter().collect(); let (account, slot) = accounts.load_slow(&ancestors, &pubkey).unwrap(); assert_eq!((account.lamports, slot), (expected_lamports, slot)); } fn assert_not_load_account(accounts: &AccountsDB, slot: Slot, pubkey: Pubkey) { let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); } fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDB, slot: Slot) -> AccountsDB { let daccounts = crate::serde_snapshot::reconstruct_accounts_db_via_serialization(accounts, slot); daccounts.print_count_and_status("daccounts"); daccounts } fn assert_no_stores(accounts: &AccountsDB, slot: Slot) { let stores = accounts.storage.read().unwrap(); info!("{:?}", stores.0.get(&slot)); assert!(stores.0.get(&slot).is_none() || stores.0.get(&slot).unwrap().is_empty()); } #[test] fn test_accounts_db_purge_keep_live() { solana_logger::setup(); let some_lamport = 223; let zero_lamport = 0; let no_data = 0; let owner = Account::default().owner; let account = Account::new(some_lamport, no_data, &owner); let pubkey = Pubkey::new_rand(); let account2 = Account::new(some_lamport, no_data, &owner); let pubkey2 = Pubkey::new_rand(); let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); let accounts = AccountsDB::new_single(); accounts.add_root(0); let mut current_slot = 1; accounts.store(current_slot, &[(&pubkey, &account)]); // Store another live account to slot 1 which will prevent any purge // since the store count will not be zero accounts.store(current_slot, &[(&pubkey2, &account2)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&pubkey, &zero_lamport_account)]); accounts.add_root(current_slot); assert_load_account(&accounts, current_slot, pubkey, zero_lamport); current_slot += 1; accounts.add_root(current_slot); accounts.print_accounts_stats("pre_purge"); accounts.clean_accounts(); accounts.print_accounts_stats("post_purge"); // Make sure the index is not touched assert_eq!( accounts .accounts_index .read() .unwrap() .account_maps .get(&pubkey) .unwrap() .1 .read() .unwrap() .len(), 2 ); // slot 1 & 2 should have stores check_storage(&accounts, 1, 2); check_storage(&accounts, 2, 1); } #[test] fn test_accounts_db_purge1() { solana_logger::setup(); let some_lamport = 223; let zero_lamport = 0; let no_data = 0; let owner = Account::default().owner; let account = Account::new(some_lamport, no_data, &owner); let pubkey = Pubkey::new_rand(); let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); let accounts = AccountsDB::new_single(); accounts.add_root(0); let mut current_slot = 1; accounts.set_hash(current_slot, current_slot - 1); accounts.store(current_slot, &[(&pubkey, &account)]); accounts.add_root(current_slot); current_slot += 1; accounts.set_hash(current_slot, current_slot - 1); accounts.store(current_slot, &[(&pubkey, &zero_lamport_account)]); accounts.add_root(current_slot); assert_load_account(&accounts, current_slot, pubkey, zero_lamport); // Otherwise slot 2 will not be removed current_slot += 1; accounts.set_hash(current_slot, current_slot - 1); accounts.add_root(current_slot); accounts.print_accounts_stats("pre_purge"); let ancestors = linear_ancestors(current_slot); info!("ancestors: {:?}", ancestors); let hash = accounts.update_accounts_hash(current_slot, &ancestors); accounts.clean_accounts(); accounts.process_dead_slots(None); assert_eq!( accounts.update_accounts_hash(current_slot, &ancestors), hash ); accounts.print_accounts_stats("post_purge"); // Make sure the index is for pubkey cleared assert!(accounts .accounts_index .read() .unwrap() .account_maps .get(&pubkey) .is_none()); // slot 1 & 2 should not have any stores assert_no_stores(&accounts, 1); assert_no_stores(&accounts, 2); } #[test] fn test_accounts_db_serialize_zero_and_free() { solana_logger::setup(); let some_lamport = 223; let zero_lamport = 0; let no_data = 0; let owner = Account::default().owner; let account = Account::new(some_lamport, no_data, &owner); let pubkey = Pubkey::new_rand(); let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); let account2 = Account::new(some_lamport + 1, no_data, &owner); let pubkey2 = Pubkey::new_rand(); let filler_account = Account::new(some_lamport, no_data, &owner); let filler_account_pubkey = Pubkey::new_rand(); let accounts = AccountsDB::new_single(); let mut current_slot = 1; accounts.store(current_slot, &[(&pubkey, &account)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&pubkey, &zero_lamport_account)]); accounts.store(current_slot, &[(&pubkey2, &account2)]); // Store enough accounts such that an additional store for slot 2 is created. while accounts .storage .read() .unwrap() .0 .get(&current_slot) .unwrap() .len() < 2 { accounts.store(current_slot, &[(&filler_account_pubkey, &filler_account)]); } accounts.add_root(current_slot); assert_load_account(&accounts, current_slot, pubkey, zero_lamport); accounts.print_accounts_stats("accounts"); accounts.clean_accounts(); accounts.print_accounts_stats("accounts_post_purge"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_accounts_stats("reconstructed"); assert_load_account(&accounts, current_slot, pubkey, zero_lamport); } fn with_chained_zero_lamport_accounts<F>(f: F) where F: Fn(AccountsDB, Slot) -> AccountsDB, { let some_lamport = 223; let zero_lamport = 0; let dummy_lamport = 999; let no_data = 0; let owner = Account::default().owner; let account = Account::new(some_lamport, no_data, &owner); let account2 = Account::new(some_lamport + 100_001, no_data, &owner); let account3 = Account::new(some_lamport + 100_002, no_data, &owner); let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); let pubkey = Pubkey::new_rand(); let purged_pubkey1 = Pubkey::new_rand(); let purged_pubkey2 = Pubkey::new_rand(); let dummy_account = Account::new(dummy_lamport, no_data, &owner); let dummy_pubkey = Pubkey::default(); let accounts = AccountsDB::new_single(); let mut current_slot = 1; accounts.store(current_slot, &[(&pubkey, &account)]); accounts.store(current_slot, &[(&purged_pubkey1, &account2)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]); accounts.store(current_slot, &[(&purged_pubkey2, &account3)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&dummy_pubkey, &dummy_account)]); accounts.add_root(current_slot); accounts.print_accounts_stats("pre_f"); accounts.update_accounts_hash(4, &HashMap::default()); let accounts = f(accounts, current_slot); accounts.print_accounts_stats("post_f"); assert_load_account(&accounts, current_slot, pubkey, some_lamport); assert_load_account(&accounts, current_slot, purged_pubkey1, 0); assert_load_account(&accounts, current_slot, purged_pubkey2, 0); assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport); accounts .verify_bank_hash_and_lamports(4, &HashMap::default(), 1222) .unwrap(); } #[test] fn test_accounts_purge_chained_purge_before_snapshot_restore() { solana_logger::setup(); with_chained_zero_lamport_accounts(|accounts, current_slot| { accounts.clean_accounts(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } #[test] fn test_accounts_purge_chained_purge_after_snapshot_restore() { solana_logger::setup(); with_chained_zero_lamport_accounts(|accounts, current_slot| { let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_accounts_stats("after_reconstruct"); accounts.clean_accounts(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } #[test] #[ignore] fn test_store_account_stress() { let slot = 42; let num_threads = 2; let min_file_bytes = std::mem::size_of::<StoredMeta>() + std::mem::size_of::<crate::append_vec::AccountMeta>(); let db = Arc::new(AccountsDB::new_sized(Vec::new(), min_file_bytes as u64)); db.add_root(slot); let thread_hdls: Vec<_> = (0..num_threads) .map(|_| { let db = db.clone(); std::thread::Builder::new() .name("account-writers".to_string()) .spawn(move || { let pubkey = Pubkey::new_rand(); let mut account = Account::new(1, 0, &pubkey); let mut i = 0; loop { let account_bal = thread_rng().gen_range(1, 99); account.lamports = account_bal; db.store(slot, &[(&pubkey, &account)]); let (account, slot) = db.load_slow(&HashMap::new(), &pubkey).unwrap_or_else(|| { panic!("Could not fetch stored account {}, iter {}", pubkey, i) }); assert_eq!(slot, slot); assert_eq!(account.lamports, account_bal); i += 1; } }) .unwrap() }) .collect(); for t in thread_hdls { t.join().unwrap(); } } #[test] fn test_accountsdb_scan_accounts() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let key0 = Pubkey::new_rand(); let account0 = Account::new(1, 0, &key); db.store(0, &[(&key0, &account0)]); let key1 = Pubkey::new_rand(); let account1 = Account::new(2, 0, &key); db.store(1, &[(&key1, &account1)]); let ancestors = vec![(0, 0)].into_iter().collect(); let accounts: Vec<Account> = db.scan_accounts(&ancestors, |accounts: &mut Vec<Account>, option| { if let Some(data) = option { accounts.push(data.1); } }); assert_eq!(accounts, vec![account0]); let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); let accounts: Vec<Account> = db.scan_accounts(&ancestors, |accounts: &mut Vec<Account>, option| { if let Some(data) = option { accounts.push(data.1); } }); assert_eq!(accounts.len(), 2); } #[test] fn test_cleanup_key_not_removed() { solana_logger::setup(); let db = AccountsDB::new_single(); let key = Pubkey::default(); let key0 = Pubkey::new_rand(); let account0 = Account::new(1, 0, &key); db.store(0, &[(&key0, &account0)]); let key1 = Pubkey::new_rand(); let account1 = Account::new(2, 0, &key); db.store(1, &[(&key1, &account1)]); db.print_accounts_stats("pre"); let slots: HashSet<Slot> = HashSet::from_iter(vec![1].into_iter()); let purge_keys = vec![(key1, slots)]; let (_reclaims, dead_keys) = db.purge_keys_exact(purge_keys); let account2 = Account::new(3, 0, &key); db.store(2, &[(&key1, &account2)]); db.handle_dead_keys(dead_keys); db.print_accounts_stats("post"); let ancestors = vec![(2, 0)].into_iter().collect(); assert_eq!(db.load_slow(&ancestors, &key1).unwrap().0.lamports, 3); } #[test] fn test_store_large_account() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let data_len = DEFAULT_FILE_SIZE as usize + 7; let account = Account::new(1, data_len, &key); db.store(0, &[(&key, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); let ret = db.load_slow(&ancestors, &key).unwrap(); assert_eq!(ret.0.data.len(), data_len); } pub fn copy_append_vecs<P: AsRef<Path>>( accounts_db: &AccountsDB, output_dir: P, ) -> IOResult<()> { let storage_entries = accounts_db.get_snapshot_storages(Slot::max_value()); for storage in storage_entries.iter().flatten() { let storage_path = storage.get_path(); let output_path = output_dir.as_ref().join( storage_path .file_name() .expect("Invalid AppendVec file path"), ); fs::copy(storage_path, output_path)?; } Ok(()) } #[test] fn test_hash_frozen_account_data() { let account = Account::new(1, 42, &Pubkey::default()); let hash = AccountsDB::hash_frozen_account_data(&account); assert_ne!(hash, Hash::default()); // Better not be the default Hash // Lamports changes to not affect the hash let mut account_modified = account.clone(); account_modified.lamports -= 1; assert_eq!( hash, AccountsDB::hash_frozen_account_data(&account_modified) ); // Rent epoch may changes to not affect the hash let mut account_modified = account.clone(); account_modified.rent_epoch += 1; assert_eq!( hash, AccountsDB::hash_frozen_account_data(&account_modified) ); // Account data may not be modified let mut account_modified = account.clone(); account_modified.data[0] = 42; assert_ne!( hash, AccountsDB::hash_frozen_account_data(&account_modified) ); // Owner may not be modified let mut account_modified = account.clone(); account_modified.owner = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); assert_ne!( hash, AccountsDB::hash_frozen_account_data(&account_modified) ); // Executable may not be modified let mut account_modified = account; account_modified.executable = true; assert_ne!( hash, AccountsDB::hash_frozen_account_data(&account_modified) ); } #[test] fn test_frozen_account_lamport_increase() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store(0, &[(&frozen_pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); db.freeze_accounts(&ancestors, &[frozen_pubkey]); // Store with no account changes is ok db.store(0, &[(&frozen_pubkey, &account)]); // Store with an increase in lamports is ok account.lamports = 2; db.store(0, &[(&frozen_pubkey, &account)]); // Store with an decrease that does not go below the frozen amount of lamports is tolerated account.lamports = 1; db.store(0, &[(&frozen_pubkey, &account)]); // A store of any value over the frozen value of '1' across different slots is also ok account.lamports = 3; db.store(1, &[(&frozen_pubkey, &account)]); account.lamports = 2; db.store(2, &[(&frozen_pubkey, &account)]); account.lamports = 1; db.store(3, &[(&frozen_pubkey, &account)]); } #[test] #[should_panic( expected = "Frozen account My11111111111111111111111111111111111111111 modified. Lamports decreased from 1 to 0" )] fn test_frozen_account_lamport_decrease() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store(0, &[(&frozen_pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); db.freeze_accounts(&ancestors, &[frozen_pubkey]); // Store with a decrease below the frozen amount of lamports is not ok account.lamports -= 1; db.store(0, &[(&frozen_pubkey, &account)]); } #[test] #[should_panic( expected = "Unable to freeze an account that does not exist: My11111111111111111111111111111111111111111" )] fn test_frozen_account_nonexistent() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); let ancestors = vec![(0, 0)].into_iter().collect(); db.freeze_accounts(&ancestors, &[frozen_pubkey]); } #[test] #[should_panic( expected = "Frozen account My11111111111111111111111111111111111111111 modified. Hash changed from 8wHcxDkjiwdrkPAsDnmNrF1UDGJFAtZzPQBSVweY3yRA to JdscGYB1uczVssmYuJusDD1Bfe6wpNeeho8XjcH8inN" )] fn test_frozen_account_data_modified() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store(0, &[(&frozen_pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); db.freeze_accounts(&ancestors, &[frozen_pubkey]); account.data[0] = 42; db.store(0, &[(&frozen_pubkey, &account)]); } #[test] fn test_hash_stored_account() { // This test uses some UNSAFE trick to detect most of account's field // addition and deletion without changing the hash code const ACCOUNT_DATA_LEN: usize = 3; // the type of InputTuple elements must not contain references; // they should be simple scalars or data blobs type InputTuple = ( Slot, StoredMeta, AccountMeta, [u8; ACCOUNT_DATA_LEN], usize, // for StoredAccount::offset Hash, ); const INPUT_LEN: usize = std::mem::size_of::<InputTuple>(); type InputBlob = [u8; INPUT_LEN]; let mut blob: InputBlob = [0u8; INPUT_LEN]; // spray memory with decreasing counts so that, data layout can be detected. for (i, byte) in blob.iter_mut().enumerate() { *byte = (INPUT_LEN - i) as u8; } //UNSAFE: forcibly cast the special byte pattern to actual account fields. let (slot, meta, account_meta, data, offset, hash): InputTuple = unsafe { std::mem::transmute::<InputBlob, InputTuple>(blob) }; let stored_account = StoredAccount { meta: &meta, account_meta: &account_meta, data: &data, offset, hash: &hash, }; let account = stored_account.clone_account(); let expected_account_hash = Hash::from_str("4StuvYHFd7xuShVXB94uHHvpqGMCaacdZnYB74QQkPA1").unwrap(); assert_eq!( AccountsDB::hash_stored_account(slot, &stored_account, &ClusterType::Development), expected_account_hash, "StoredAccount's data layout might be changed; update hashing if needed." ); assert_eq!( AccountsDB::hash_account( slot, &account, &stored_account.meta.pubkey, &ClusterType::Development ), expected_account_hash, "Account-based hashing must be consistent with StoredAccount-based one." ); } #[test] fn test_bank_hash_stats() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let some_data_len = 5; let some_slot: Slot = 0; let account = Account::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); db.store(some_slot, &[(&key, &account)]); let mut account = db.load_slow(&ancestors, &key).unwrap().0; account.lamports -= 1; account.executable = true; db.store(some_slot, &[(&key, &account)]); db.add_root(some_slot); let bank_hashes = db.bank_hashes.read().unwrap(); let bank_hash = bank_hashes.get(&some_slot).unwrap(); assert_eq!(bank_hash.stats.num_updated_accounts, 1); assert_eq!(bank_hash.stats.num_removed_accounts, 1); assert_eq!(bank_hash.stats.num_lamports_stored, 1); assert_eq!(bank_hash.stats.total_data_len, 2 * some_data_len as u64); assert_eq!(bank_hash.stats.num_executable_accounts, 1); } #[test] fn test_verify_bank_hash() { use BankHashVerificationError::*; solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::new_rand(); let some_data_len = 0; let some_slot: Slot = 0; let account = Account::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); db.store(some_slot, &[(&key, &account)]); db.add_root(some_slot); db.update_accounts_hash(some_slot, &ancestors); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1), Ok(_) ); db.bank_hashes.write().unwrap().remove(&some_slot).unwrap(); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1), Err(MissingBankHash) ); let some_bank_hash = Hash::new(&[0xca; HASH_BYTES]); let bank_hash_info = BankHashInfo { hash: some_bank_hash, snapshot_hash: Hash::new(&[0xca; HASH_BYTES]), stats: BankHashStats::default(), }; db.bank_hashes .write() .unwrap() .insert(some_slot, bank_hash_info); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1), Err(MismatchedBankHash) ); } #[test] fn test_verify_bank_capitalization() { use BankHashVerificationError::*; solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::new_rand(); let some_data_len = 0; let some_slot: Slot = 0; let account = Account::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); db.store(some_slot, &[(&key, &account)]); db.add_root(some_slot); db.update_accounts_hash(some_slot, &ancestors); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1), Ok(_) ); let native_account_pubkey = Pubkey::new_rand(); db.store( some_slot, &[( &native_account_pubkey, &solana_sdk::native_loader::create_loadable_account("foo"), )], ); db.update_accounts_hash(some_slot, &ancestors); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1), Ok(_) ); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 10), Err(MismatchedTotalLamports(expected, actual)) if expected == 1 && actual == 10 ); } #[test] fn test_verify_bank_hash_no_account() { solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let some_slot: Slot = 0; let ancestors = vec![(some_slot, 0)].into_iter().collect(); db.bank_hashes .write() .unwrap() .insert(some_slot, BankHashInfo::default()); db.add_root(some_slot); db.update_accounts_hash(some_slot, &ancestors); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 0), Ok(_) ); } #[test] fn test_verify_bank_hash_bad_account_hash() { use BankHashVerificationError::*; solana_logger::setup(); let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let some_data_len = 0; let some_slot: Slot = 0; let account = Account::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); let accounts = &[(&key, &account)]; // update AccountsDB's bank hash but discard real account hashes db.hash_accounts(some_slot, accounts, &ClusterType::Development); // provide bogus account hashes let some_hash = Hash::new(&[0xca; HASH_BYTES]); db.store_with_hashes(some_slot, accounts, &[some_hash]); db.add_root(some_slot); assert_matches!( db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1), Err(MismatchedAccountHash) ); } #[test] fn test_bad_bank_hash() { use solana_sdk::signature::{Keypair, Signer}; let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let some_slot: Slot = 0; let ancestors: Ancestors = [(some_slot, 0)].iter().copied().collect(); for _ in 0..10_000 { let num_accounts = thread_rng().gen_range(0, 100); let accounts_keys: Vec<_> = (0..num_accounts) .map(|_| { let key = Keypair::new().pubkey(); let lamports = thread_rng().gen_range(0, 100); let some_data_len = thread_rng().gen_range(0, 1000); let account = Account::new(lamports, some_data_len, &key); (key, account) }) .collect(); let account_refs: Vec<_> = accounts_keys .iter() .map(|(key, account)| (key, account)) .collect(); db.store(some_slot, &account_refs); for (key, account) in &accounts_keys { assert_eq!( db.load_account_hash(&ancestors, key), AccountsDB::hash_account(some_slot, &account, &key, &ClusterType::Development) ); } } } #[test] fn test_get_snapshot_storages_empty() { let db = AccountsDB::new(Vec::new(), &ClusterType::Development); assert!(db.get_snapshot_storages(0).is_empty()); } #[test] fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() { let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); let before_slot = 0; let base_slot = before_slot + 1; let after_slot = base_slot + 1; db.add_root(base_slot); db.store(base_slot, &[(&key, &account)]); assert!(db.get_snapshot_storages(before_slot).is_empty()); assert_eq!(1, db.get_snapshot_storages(base_slot).len()); assert_eq!(1, db.get_snapshot_storages(after_slot).len()); } #[test] fn test_get_snapshot_storages_only_non_empty() { let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); let base_slot = 0; let after_slot = base_slot + 1; db.store(base_slot, &[(&key, &account)]); db.storage .write() .unwrap() .0 .get_mut(&base_slot) .unwrap() .clear(); db.add_root(base_slot); assert!(db.get_snapshot_storages(after_slot).is_empty()); db.store(base_slot, &[(&key, &account)]); assert_eq!(1, db.get_snapshot_storages(after_slot).len()); } #[test] fn test_get_snapshot_storages_only_roots() { let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); let base_slot = 0; let after_slot = base_slot + 1; db.store(base_slot, &[(&key, &account)]); assert!(db.get_snapshot_storages(after_slot).is_empty()); db.add_root(base_slot); assert_eq!(1, db.get_snapshot_storages(after_slot).len()); } #[test] fn test_get_snapshot_storages_exclude_empty() { let db = AccountsDB::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); let base_slot = 0; let after_slot = base_slot + 1; db.store(base_slot, &[(&key, &account)]); db.add_root(base_slot); assert_eq!(1, db.get_snapshot_storages(after_slot).len()); let storage = db.storage.read().unwrap(); storage.0[&0].values().next().unwrap().remove_account(); assert!(db.get_snapshot_storages(after_slot).is_empty()); } #[test] #[should_panic(expected = "double remove of account in slot: 0/store: 0!!")] fn test_storage_remove_account_double_remove() { let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); let pubkey = Pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); accounts.store(0, &[(&pubkey, &account)]); let storage = accounts.storage.read().unwrap(); let storage_entry = storage.0[&0].values().next().unwrap(); storage_entry.remove_account(); storage_entry.remove_account(); } #[test] fn test_accounts_purge_long_chained_after_snapshot_restore() { solana_logger::setup(); let old_lamport = 223; let zero_lamport = 0; let no_data = 0; let owner = Account::default().owner; let account = Account::new(old_lamport, no_data, &owner); let account2 = Account::new(old_lamport + 100_001, no_data, &owner); let account3 = Account::new(old_lamport + 100_002, no_data, &owner); let dummy_account = Account::new(99_999_999, no_data, &owner); let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); let pubkey = Pubkey::new_rand(); let dummy_pubkey = Pubkey::new_rand(); let purged_pubkey1 = Pubkey::new_rand(); let purged_pubkey2 = Pubkey::new_rand(); let mut current_slot = 0; let accounts = AccountsDB::new_single(); // create intermediate updates to purged_pubkey1 so that // generate_index must add slots as root last at once current_slot += 1; accounts.store(current_slot, &[(&pubkey, &account)]); accounts.store(current_slot, &[(&purged_pubkey1, &account2)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&purged_pubkey1, &account2)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&purged_pubkey1, &account2)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]); accounts.store(current_slot, &[(&purged_pubkey2, &account3)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]); accounts.add_root(current_slot); current_slot += 1; accounts.store(current_slot, &[(&dummy_pubkey, &dummy_account)]); accounts.add_root(current_slot); accounts.print_count_and_status("before reconstruct"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_count_and_status("before purge zero"); accounts.clean_accounts(); accounts.print_count_and_status("after purge zero"); assert_load_account(&accounts, current_slot, pubkey, old_lamport); assert_load_account(&accounts, current_slot, purged_pubkey1, 0); assert_load_account(&accounts, current_slot, purged_pubkey2, 0); } #[test] fn test_accounts_clean_after_snapshot_restore_then_old_revives() { solana_logger::setup(); let old_lamport = 223; let zero_lamport = 0; let no_data = 0; let dummy_lamport = 999_999; let owner = Account::default().owner; let account = Account::new(old_lamport, no_data, &owner); let account2 = Account::new(old_lamport + 100_001, no_data, &owner); let account3 = Account::new(old_lamport + 100_002, no_data, &owner); let dummy_account = Account::new(dummy_lamport, no_data, &owner); let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); let pubkey1 = Pubkey::new_rand(); let pubkey2 = Pubkey::new_rand(); let dummy_pubkey = Pubkey::new_rand(); let mut current_slot = 0; let accounts = AccountsDB::new_single(); // A: Initialize AccountsDB with pubkey1 and pubkey2 current_slot += 1; accounts.store(current_slot, &[(&pubkey1, &account)]); accounts.store(current_slot, &[(&pubkey2, &account)]); accounts.add_root(current_slot); // B: Test multiple updates to pubkey1 in a single slot/storage current_slot += 1; assert_eq!(0, accounts.alive_account_count_in_store(current_slot)); assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1)); accounts.store(current_slot, &[(&pubkey1, &account2)]); accounts.store(current_slot, &[(&pubkey1, &account2)]); assert_eq!(1, accounts.alive_account_count_in_store(current_slot)); assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); accounts.add_root(current_slot); // C: Yet more update to trigger lazy clean of step A current_slot += 1; assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); accounts.store(current_slot, &[(&pubkey1, &account3)]); assert_eq!(4, accounts.ref_count_for_pubkey(&pubkey1)); accounts.add_root(current_slot); // D: Make pubkey1 0-lamport; also triggers clean of step B current_slot += 1; assert_eq!(4, accounts.ref_count_for_pubkey(&pubkey1)); accounts.store(current_slot, &[(&pubkey1, &zero_lamport_account)]); accounts.process_dead_slots(None); assert_eq!( 3, /* == 4 - 2 + 1 */ accounts.ref_count_for_pubkey(&pubkey1) ); accounts.add_root(current_slot); // E: Avoid missing bank hash error current_slot += 1; accounts.store(current_slot, &[(&dummy_pubkey, &dummy_account)]); accounts.add_root(current_slot); assert_load_account(&accounts, current_slot, pubkey1, zero_lamport); assert_load_account(&accounts, current_slot, pubkey2, old_lamport); assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport); // At this point, there is no index entries for A and B // If step C and step D should be purged, snapshot restore would cause // pubkey1 to be revived as the state of step A. // So, prevent that from happening by introducing refcount accounts.clean_accounts(); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.clean_accounts(); assert_load_account(&accounts, current_slot, pubkey1, zero_lamport); assert_load_account(&accounts, current_slot, pubkey2, old_lamport); assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport); // F: Finally, make Step A cleanable current_slot += 1; accounts.store(current_slot, &[(&pubkey2, &account)]); accounts.add_root(current_slot); // Do clean accounts.clean_accounts(); // Ensure pubkey2 is cleaned from the index finally assert_not_load_account(&accounts, current_slot, pubkey1); assert_load_account(&accounts, current_slot, pubkey2, old_lamport); assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport); } #[test] fn test_clean_dead_slots_empty() { let accounts = AccountsDB::new_single(); let mut dead_slots = HashSet::new(); dead_slots.insert(10); accounts.clean_dead_slots(&dead_slots); } #[test] fn test_shrink_all_slots_none() { let accounts = AccountsDB::new_single(); for _ in 0..10 { assert_eq!(0, accounts.process_stale_slot()); } accounts.shrink_all_slots(); } #[test] fn test_shrink_next_slots() { let accounts = AccountsDB::new_single(); let mut current_slot = 7; assert_eq!( vec![None, None, None], (0..3) .map(|_| accounts.next_shrink_slot()) .collect::<Vec<_>>() ); accounts.add_root(current_slot); assert_eq!( vec![Some(7), Some(7), Some(7)], (0..3) .map(|_| accounts.next_shrink_slot()) .collect::<Vec<_>>() ); current_slot += 1; accounts.add_root(current_slot); let slots = (0..6) .map(|_| accounts.next_shrink_slot()) .collect::<Vec<_>>(); // Because the origin of this data is HashMap (not BTreeMap), key order is arbitrary per cycle. assert!( vec![Some(7), Some(8), Some(7), Some(8), Some(7), Some(8)] == slots || vec![Some(8), Some(7), Some(8), Some(7), Some(8), Some(7)] == slots ); } #[test] fn test_shrink_reset_uncleaned_roots() { let accounts = AccountsDB::new_single(); accounts.reset_uncleaned_roots(); assert_eq!( *accounts.shrink_candidate_slots.lock().unwrap(), vec![] as Vec<Slot> ); accounts.add_root(0); accounts.add_root(1); accounts.add_root(2); accounts.reset_uncleaned_roots(); let actual_slots = accounts.shrink_candidate_slots.lock().unwrap().clone(); assert_eq!(actual_slots, vec![] as Vec<Slot>); accounts.reset_uncleaned_roots(); let mut actual_slots = accounts.shrink_candidate_slots.lock().unwrap().clone(); actual_slots.sort(); assert_eq!(actual_slots, vec![0, 1, 2]); accounts.accounts_index.write().unwrap().roots.clear(); let mut actual_slots = (0..5) .map(|_| accounts.next_shrink_slot()) .collect::<Vec<_>>(); actual_slots.sort(); assert_eq!(actual_slots, vec![None, None, Some(0), Some(1), Some(2)],); } #[test] fn test_shrink_stale_slots_processed() { solana_logger::setup(); let accounts = AccountsDB::new_single(); let pubkey_count = 100; let pubkeys: Vec<_> = (0..pubkey_count).map(|_| Pubkey::new_rand()).collect(); let some_lamport = 223; let no_data = 0; let owner = Account::default().owner; let account = Account::new(some_lamport, no_data, &owner); let mut current_slot = 0; current_slot += 1; for pubkey in &pubkeys { accounts.store(current_slot, &[(&pubkey, &account)]); } let shrink_slot = current_slot; accounts.add_root(current_slot); current_slot += 1; let pubkey_count_after_shrink = 10; let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { accounts.store(current_slot, &[(&pubkey, &account)]); } accounts.add_root(current_slot); accounts.clean_accounts(); assert_eq!( pubkey_count, accounts.all_account_count_in_append_vec(shrink_slot) ); accounts.shrink_all_slots(); assert_eq!( pubkey_count_after_shrink, accounts.all_account_count_in_append_vec(shrink_slot) ); let no_ancestors = HashMap::default(); accounts.update_accounts_hash(current_slot, &no_ancestors); accounts .verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300) .unwrap(); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts .verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300) .unwrap(); // repeating should be no-op accounts.shrink_all_slots(); assert_eq!( pubkey_count_after_shrink, accounts.all_account_count_in_append_vec(shrink_slot) ); } #[test] fn test_shrink_stale_slots_skipped() { solana_logger::setup(); let accounts = AccountsDB::new_single(); let pubkey_count = 100; let pubkeys: Vec<_> = (0..pubkey_count).map(|_| Pubkey::new_rand()).collect(); let some_lamport = 223; let no_data = 0; let owner = Account::default().owner; let account = Account::new(some_lamport, no_data, &owner); let mut current_slot = 0; current_slot += 1; for pubkey in &pubkeys { accounts.store(current_slot, &[(&pubkey, &account)]); } let shrink_slot = current_slot; accounts.add_root(current_slot); current_slot += 1; let pubkey_count_after_shrink = 90; let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { accounts.store(current_slot, &[(&pubkey, &account)]); } accounts.add_root(current_slot); accounts.clean_accounts(); assert_eq!( pubkey_count, accounts.all_account_count_in_append_vec(shrink_slot) ); // Only, try to shrink stale slots. accounts.shrink_all_stale_slots(); assert_eq!( pubkey_count, accounts.all_account_count_in_append_vec(shrink_slot) ); // Now, do full-shrink. accounts.shrink_all_slots(); assert_eq!( pubkey_count_after_shrink, accounts.all_account_count_in_append_vec(shrink_slot) ); } #[test] fn test_delete_dependencies() { solana_logger::setup(); let mut accounts_index = AccountsIndex::default(); let key0 = Pubkey::new_from_array([0u8; 32]); let key1 = Pubkey::new_from_array([1u8; 32]); let key2 = Pubkey::new_from_array([2u8; 32]); let info0 = AccountInfo { store_id: 0, offset: 0, lamports: 0, }; let info1 = AccountInfo { store_id: 1, offset: 0, lamports: 0, }; let info2 = AccountInfo { store_id: 2, offset: 0, lamports: 0, }; let info3 = AccountInfo { store_id: 3, offset: 0, lamports: 0, }; let mut reclaims = vec![]; accounts_index.insert(0, &key0, info0, &mut reclaims); accounts_index.insert(1, &key0, info1.clone(), &mut reclaims); accounts_index.insert(1, &key1, info1, &mut reclaims); accounts_index.insert(2, &key1, info2.clone(), &mut reclaims); accounts_index.insert(2, &key2, info2, &mut reclaims); accounts_index.insert(3, &key2, info3, &mut reclaims); accounts_index.add_root(0); accounts_index.add_root(1); accounts_index.add_root(2); accounts_index.add_root(3); let mut purges = HashMap::new(); purges.insert(key0, accounts_index.would_purge(&key0)); purges.insert(key1, accounts_index.would_purge(&key1)); purges.insert(key2, accounts_index.would_purge(&key2)); for (key, (list, ref_count)) in &purges { info!(" purge {} ref_count {} =>", key, ref_count); for x in list { info!(" {:?}", x); } } let mut store_counts = HashMap::new(); store_counts.insert(0, (0, HashSet::from_iter(vec![key0]))); store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1]))); store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2]))); store_counts.insert(3, (1, HashSet::from_iter(vec![key2]))); AccountsDB::calc_delete_dependencies(&purges, &mut store_counts); let mut stores: Vec<_> = store_counts.keys().cloned().collect(); stores.sort(); for store in &stores { info!( "store: {:?} : {:?}", store, store_counts.get(&store).unwrap() ); } for x in 0..3 { assert!(store_counts[&x].0 >= 1); } } #[test] fn test_shrink_and_clean() { solana_logger::setup(); // repeat the whole test scenario for _ in 0..5 { let accounts = Arc::new(AccountsDB::new_single()); let accounts_for_shrink = accounts.clone(); // spawn the slot shrinking background thread let exit = Arc::new(AtomicBool::default()); let exit_for_shrink = exit.clone(); let shrink_thread = std::thread::spawn(move || loop { if exit_for_shrink.load(Ordering::Relaxed) { break; } accounts_for_shrink.process_stale_slot(); }); let mut alive_accounts = vec![]; let owner = Pubkey::default(); // populate the AccountsDB with plenty of food for slot shrinking // also this simulates realistic some heavy spike account updates in the wild for current_slot in 0..1000 { while alive_accounts.len() <= 10 { alive_accounts.push(( Pubkey::new_rand(), Account::new(thread_rng().gen_range(0, 50), 0, &owner), )); } alive_accounts.retain(|(_pubkey, account)| account.lamports >= 1); for (pubkey, account) in alive_accounts.iter_mut() { account.lamports -= 1; accounts.store(current_slot, &[(&pubkey, &account)]); } accounts.add_root(current_slot); } // let's dance. for _ in 0..10 { accounts.clean_accounts(); std::thread::sleep(std::time::Duration::from_millis(100)); } // cleanup exit.store(true, Ordering::Relaxed); shrink_thread.join().unwrap(); } } #[test] fn test_account_balance_for_capitalization_normal() { // system accounts assert_eq!( AccountsDB::account_balance_for_capitalization(10, &Pubkey::default(), false), 10 ); // any random program data accounts assert_eq!( AccountsDB::account_balance_for_capitalization(10, &Pubkey::new_rand(), false), 10 ); } #[test] fn test_account_balance_for_capitalization_sysvar() { use solana_sdk::sysvar::Sysvar; let normal_sysvar = solana_sdk::slot_history::SlotHistory::default().create_account(1); assert_eq!( AccountsDB::account_balance_for_capitalization( normal_sysvar.lamports, &normal_sysvar.owner, normal_sysvar.executable ), 0 ); // currently transactions can send any lamports to sysvars although this is not sensible. assert_eq!( AccountsDB::account_balance_for_capitalization(10, &solana_sdk::sysvar::id(), false), 9 ); } #[test] fn test_account_balance_for_capitalization_native_program() { let normal_native_program = solana_sdk::native_loader::create_loadable_account("foo"); assert_eq!( AccountsDB::account_balance_for_capitalization( normal_native_program.lamports, &normal_native_program.owner, normal_native_program.executable ), 0 ); // test maliciously assigned bogus native loader account assert_eq!( AccountsDB::account_balance_for_capitalization( 1, &solana_sdk::native_loader::id(), false ), 1 ) } #[test] fn test_checked_sum_for_capitalization_normal() { assert_eq!( AccountsDB::checked_sum_for_capitalization(vec![1, 2].into_iter()), 3 ); } #[test] #[should_panic(expected = "overflow is detected while summing capitalization")] fn test_checked_sum_for_capitalization_overflow() { assert_eq!( AccountsDB::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()), 3 ); } }
36.142982
200
0.558935
1a4c1b0eab8db1208a940b3af954c4ce253a7ba1
1,558
// This file is part of Substrate. // Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. //! Substrate CLI library. //! //! This package has two Cargo features: //! //! - `cli` (default): exposes functions that parse command-line options, then start and run the //! node as a CLI application. //! //! - `browser`: exposes the content of the `browser` module, which consists of exported symbols //! that are meant to be passed through the `wasm-bindgen` utility and called from JavaScript. //! Despite its name the produced WASM can theoretically also be used from NodeJS, although this //! hasn't been tested. #![warn(missing_docs)] pub mod chain_spec; #[macro_use] mod service; #[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] mod command; #[cfg(feature = "cli")] pub use cli::*; #[cfg(feature = "cli")] pub use command::*;
33.869565
96
0.724647
6add790760147b8485f0332c1510e15298389fd7
4,829
// Copyright (c) 2017 Stefan Lankes, RWTH Aachen University // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #![allow(dead_code)] use consts::*; use core::mem; use x86::bits64::segmentation::*; use x86::bits64::task::*; use x86::controlregs::{cr3, cr3_write}; use x86::dtables::{self, DescriptorTablePointer}; use x86::segmentation::*; use x86::Ring; //use logging::*; use scheduler; const GDT_NULL: usize = 0; const GDT_KERNEL_CODE: usize = 1; const GDT_KERNEL_DATA: usize = 2; const GDT_USER32_CODE: usize = 3; const GDT_USER32_DATA: usize = 4; const GDT_USER64_CODE: usize = 5; const GDT_FIRST_TSS: usize = 6; // fox x86_64 is a TSS descriptor twice larger than a code/data descriptor const TSS_ENTRIES: usize = 2; const GDT_ENTRIES: usize = (GDT_FIRST_TSS + TSS_ENTRIES); /// We use IST1 through IST4. /// Each critical exception (NMI, Double Fault, Machine Check) gets a dedicated one while IST1 is shared for all other /// interrupts. See also irq.rs. const IST_ENTRIES: usize = 4; // thread_local on a static mut, signals that the value of this static may // change depending on the current thread. static mut GDT: [Descriptor; GDT_ENTRIES] = [Descriptor::NULL; GDT_ENTRIES]; static mut TSS: Tss = Tss::from(TaskStateSegment::new()); static IST: [u8; IST_ENTRIES * STACK_SIZE] = [0; IST_ENTRIES * STACK_SIZE]; // workaround to use the new repr(align) feature // currently, it is only supported by structs // => map all task state segments in a struct #[repr(align(128))] pub struct Tss(TaskStateSegment); impl Tss { pub const fn into(self) -> TaskStateSegment { self.0 } pub const fn from(x: TaskStateSegment) -> Self { Tss(x) } } /// This will setup the special GDT /// pointer, set up the entries in our GDT, and then /// finally to load the new GDT and to update the /// new segment registers pub fn init() { unsafe { // The NULL descriptor is always the first entry. GDT[GDT_NULL] = Descriptor::NULL; // The second entry is a 64-bit Code Segment in kernel-space (Ring 0). // All other parameters are ignored. GDT[GDT_KERNEL_CODE] = DescriptorBuilder::code_descriptor(0, 0, CodeSegmentType::ExecuteRead) .present() .dpl(Ring::Ring0) .l() .finish(); // The third entry is a 64-bit Data Segment in kernel-space (Ring 0). // All other parameters are ignored. GDT[GDT_KERNEL_DATA] = DescriptorBuilder::data_descriptor(0, 0, DataSegmentType::ReadWrite) .present() .dpl(Ring::Ring0) .finish(); /* * Create code segment for 32bit user-space applications (ring 3) */ GDT[GDT_USER32_CODE] = DescriptorBuilder::code_descriptor(0, 0, CodeSegmentType::ExecuteRead) .present() .dpl(Ring::Ring3) .finish(); /* * Create code segment for 32bit user-space applications (ring 3) */ GDT[GDT_USER32_DATA] = DescriptorBuilder::data_descriptor(0, 0, DataSegmentType::ReadWrite) .present() .dpl(Ring::Ring3) .finish(); /* * Create code segment for 64bit user-space applications (ring 3) */ GDT[GDT_USER64_CODE] = DescriptorBuilder::code_descriptor(0, 0, CodeSegmentType::ExecuteRead) .present() .dpl(Ring::Ring3) .l() .finish(); /* * Create TSS for each core (we use these segments for task switching) */ let base = &TSS.0 as *const _ as u64; let tss_descriptor: Descriptor64 = <DescriptorBuilder as GateDescriptorBuilder<u64>>::tss_descriptor( base, base + mem::size_of::<TaskStateSegment>() as u64 - 1, true, ) .present() .dpl(Ring::Ring0) .finish(); GDT[GDT_FIRST_TSS..GDT_FIRST_TSS + TSS_ENTRIES] .copy_from_slice(&mem::transmute::<Descriptor64, [Descriptor; 2]>( tss_descriptor, )); // Allocate all ISTs for this core. for i in 0..IST_ENTRIES { TSS.0.ist[i] = &IST[i * STACK_SIZE] as *const _ as u64 + STACK_SIZE as u64 - 0x10; } // load GDT let gdtr = DescriptorTablePointer::new(&GDT); dtables::lgdt(&gdtr); // Reload the segment descriptors load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16, Ring::Ring0)); load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16, Ring::Ring0)); load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16, Ring::Ring0)); load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16, Ring::Ring0)); } } #[inline(always)] unsafe fn set_kernel_stack(stack: usize) { TSS.0.rsp[0] = stack as u64; } #[no_mangle] pub unsafe extern "C" fn set_current_kernel_stack() { let root = scheduler::get_root_page_table() as u64; if root != cr3() { cr3_write(root); } let rsp = scheduler::get_current_stack(); set_kernel_stack(rsp + STACK_SIZE - 0x10); }
29.808642
118
0.700145
6736b78278c84c71c6d0c836a58424b3fea0afad
1,547
//! Endpoint for serving GraphiQL source. use finchers::endpoint::{ApplyContext, ApplyResult, Endpoint}; use finchers::error::Error; use futures::{Future, Poll}; use bytes::Bytes; use http::{header, Response}; use juniper; /// Creates an endpoint which returns a generated GraphiQL interface. pub fn graphiql_source(endpoint_url: impl AsRef<str>) -> GraphiQLSource { GraphiQLSource { source: juniper::http::graphiql::graphiql_source(endpoint_url.as_ref()).into(), } } #[allow(missing_docs)] #[derive(Debug)] pub struct GraphiQLSource { source: Bytes, } impl GraphiQLSource { /// Regenerate the GraphiQL interface with the specified endpoint URL. pub fn regenerate(&mut self, endpoint_url: impl AsRef<str>) { self.source = juniper::http::graphiql::graphiql_source(endpoint_url.as_ref()).into(); } } impl<'a> Endpoint<'a> for GraphiQLSource { type Output = (Response<Bytes>,); type Future = GraphiQLFuture<'a>; fn apply(&'a self, _: &mut ApplyContext<'_>) -> ApplyResult<Self::Future> { Ok(GraphiQLFuture(&self.source)) } } #[doc(hidden)] #[derive(Debug)] pub struct GraphiQLFuture<'a>(&'a Bytes); impl<'a> Future for GraphiQLFuture<'a> { type Item = (Response<Bytes>,); type Error = Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { Ok((Response::builder() .header(header::CONTENT_TYPE, "text/html; charset=utf-8") .body(self.0.clone()) .expect("should be a valid response"),) .into()) } }
27.140351
93
0.656109
f8cec83dc31cc797dea516074614621df2da2894
5,011
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use dot; use rustc::mir::repr::*; use std::borrow::IntoCow; #[derive(Copy, Clone, PartialEq, Eq)] pub struct EdgeIndex { source: BasicBlock, target: BasicBlock, index: usize, } impl<'a,'tcx> dot::Labeller<'a, BasicBlock, EdgeIndex> for Mir<'tcx> { fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("Mir").unwrap() } fn node_id(&'a self, n: &BasicBlock) -> dot::Id<'a> { dot::Id::new(format!("BB{}", n.index())).unwrap() } fn node_shape(&'a self, _: &BasicBlock) -> Option<dot::LabelText<'a>> { Some(dot::LabelText::label("none")) } fn node_label(&'a self, &n: &BasicBlock) -> dot::LabelText<'a> { let mut buffer = String::new(); buffer.push_str("<TABLE ALIGN=\"LEFT\">"); buffer.push_str("<TR><TD>"); buffer.push_str(&format!("{:?}", n)); buffer.push_str("</TD></TR>"); let data = self.basic_block_data(n); for statement in &data.statements { buffer.push_str("<TR><TD>"); buffer.push_str(&escape(format!("{:?}", statement))); buffer.push_str("</TD></TR>"); } buffer.push_str("<TR><TD>"); buffer.push_str(&escape(format!("{:?}", &data.terminator))); buffer.push_str("</TD></TR>"); buffer.push_str("</TABLE>"); dot::LabelText::html(buffer) } fn edge_label(&'a self, edge: &EdgeIndex) -> dot::LabelText<'a> { dot::LabelText::label(format!("{}", edge.index)) } } impl<'a,'tcx> dot::GraphWalk<'a, BasicBlock, EdgeIndex> for Mir<'tcx> { fn nodes(&'a self) -> dot::Nodes<'a, BasicBlock> { self.all_basic_blocks().into_cow() } fn edges(&'a self) -> dot::Edges<'a, EdgeIndex> { self.all_basic_blocks() .into_iter() .flat_map(|source| { self.basic_block_data(source) .terminator .successors() .iter() .enumerate() .map(move |(index, &target)| { EdgeIndex { source: source, target: target, index: index, } }) }) .collect::<Vec<_>>() .into_cow() } fn source(&'a self, edge: &EdgeIndex) -> BasicBlock { edge.source } fn target(&'a self, edge: &EdgeIndex) -> BasicBlock { edge.target } } fn escape(text: String) -> String { let text = dot::escape_html(&text); let text = all_to_subscript("Temp", text); let text = all_to_subscript("Var", text); let text = all_to_subscript("Arg", text); let text = all_to_subscript("BB", text); text } /// A call like `all_to_subscript("Temp", "Temp(123)")` will convert /// to `Temp₁₂₃`. fn all_to_subscript(header: &str, mut text: String) -> String { let mut offset = 0; while offset < text.len() { if let Some(text1) = to_subscript1(header, &text, &mut offset) { text = text1; } } return text; /// Looks for `Foo(\d*)` where `header=="Foo"` and replaces the `\d` with subscripts. /// Updates `offset` to point to the next location where we might want to search. /// Returns an updated string if changes were made, else None. fn to_subscript1(header: &str, text: &str, offset: &mut usize) -> Option<String> { let a = match text[*offset..].find(header) { None => { *offset = text.len(); return None; } Some(a) => a + *offset, }; // Example: // // header: "Foo" // text: ....Foo(123)... // ^ ^ // a b let b = a + header.len(); *offset = b; let mut chars = text[b..].chars(); if Some('(') != chars.next() { return None; } let mut result = String::new(); result.push_str(&text[..b]); while let Some(c) = chars.next() { if c == ')' { break; } if !c.is_digit(10) { return None; } // 0x208 is _0 in unicode, 0x209 is _1, etc const SUBSCRIPTS: &'static str = "₀₁₂₃₄₅₆₇₈₉"; let n = (c as usize) - ('0' as usize); result.extend(SUBSCRIPTS.chars().skip(n).take(1)); } result.extend(chars); return Some(result); } }
30.005988
89
0.512872
f7d79d49b9fe7ed1372e9a8c1a961eb76ad4ead9
3,507
use crate::builtin::env::RispEnv; use crate::ir::llvm_type::int32_type; use crate::ir::operate::{build_alloca, build_int32_value, build_load, build_store, call_function}; use crate::ir::string::codegen_string; use crate::{LLVMValueWrapper, RispCallback, RispErr, RispExp}; use llvm_sys::LLVMValue; use std::io; use std::ptr::null_mut; #[allow(dead_code)] pub fn wrap_llvm_value(value: f64, llvm_ref: LLVMValueWrapper) -> RispExp { RispExp::Number(value, llvm_ref) } #[allow(dead_code)] pub fn unwrap_object(exp: &RispExp) -> *mut LLVMValue { match *exp { RispExp::Number(_f, (llvm_ref, _)) => llvm_ref, _ => panic!("failed to unwrap object: {}", exp), } } pub fn load_llvm_value(env: &RispEnv, exp: &RispExp) -> *mut LLVMValue { match exp { RispExp::Number(_, value_ref) => { if !value_ref.1 { build_load(env.llvm_builder, value_ref.0, "") } else { value_ref.0 } } _ => 0 as *mut LLVMValue } } pub fn eval_number(env: &RispEnv, f: f64) -> RispExp { let llvm_input = build_alloca(env.llvm_builder, int32_type(), ""); build_store(env.llvm_builder, build_int32_value(f), llvm_input); // let llvm_input = build_load(env.llvm_builder, llvm_input, ""); RispExp::Number(f, (llvm_input, false)) } // printf keyword pub fn eval_printf_fn( env: &mut RispEnv, func: &RispCallback, args_eval: Result<Vec<RispExp>, RispErr>, ) -> Result<RispExp, RispErr> { let args_eval = args_eval?; let first_arg = args_eval.first(); let mut llvm_val: *mut LLVMValue = null_mut(); // emit IR if let Some(RispExp::Number(_, llvm_ref)) = first_arg { let llvm_ref = *llvm_ref; let printf = env.built_ins["printf"]; let llvm_value = build_alloca(env.llvm_builder, int32_type(), ""); build_store(env.llvm_builder, llvm_ref.0, llvm_value); let print_int = build_load(env.llvm_builder, llvm_value, ""); let printf_args = vec![codegen_string(env, "Result: %d\n", ""), print_int]; call_function(env.llvm_builder, printf, printf_args, ""); llvm_val = llvm_ref.0; } // eval print func(env, &args_eval, Some(llvm_val)) } // input keyword pub fn eval_input_fn(env: &mut RispEnv, func: &RispCallback) -> Result<RispExp, RispErr> { let input_fn = env.built_ins["input"]; let llvm_input = build_alloca(env.llvm_builder, int32_type(), "input"); let input_args = vec![codegen_string(env, "%u", ""), llvm_input]; // emit IR call_function(env.llvm_builder, input_fn, input_args, ""); // eval func(env, &[], Some(llvm_input)) } // arithmetic pub fn eval_arithmetic( env: &mut RispEnv, _op: &str, func: &RispCallback, args_eval: Result<Vec<RispExp>, RispErr>, ) -> Result<RispExp, RispErr> { let risp_args = args_eval.unwrap(); func(env, &risp_args, None) } // utils fn parse_single_float(exp: &RispExp) -> Result<f64, RispErr> { match exp { RispExp::Number(num, _) => Ok(*num), _ => Err(RispErr::Reason("expected a number".to_string())), } } pub fn get_input(prompt: &str) -> String { println!("{}", prompt); let mut input = String::new(); match io::stdin().read_line(&mut input) { Ok(_) => {} Err(_) => {} } input.trim().to_string() } pub fn parse_list_of_floats(args: &[RispExp]) -> Result<Vec<f64>, RispErr> { args.iter().map(|x| parse_single_float(x)).collect() }
29.470588
98
0.631594
694ac118fd1670ffdfb8b8c81c49925e00d883ab
30,001
extern crate wgpu_hal as hal; use hal::{ Adapter as _, CommandEncoder as _, Device as _, Instance as _, Queue as _, Surface as _, }; use std::{borrow::Borrow, iter, mem, num::NonZeroU32, ptr, time::Instant}; const MAX_BUNNIES: usize = 1 << 20; const BUNNY_SIZE: f32 = 0.15 * 256.0; const GRAVITY: f32 = -9.8 * 100.0; const MAX_VELOCITY: f32 = 750.0; const COMMAND_BUFFER_PER_CONTEXT: usize = 100; #[repr(C)] #[derive(Clone, Copy)] struct Globals { mvp: [[f32; 4]; 4], size: [f32; 2], pad: [f32; 2], } #[repr(C, align(256))] #[derive(Clone, Copy)] struct Locals { position: [f32; 2], velocity: [f32; 2], color: u32, _pad: u32, } struct ExecutionContext<A: hal::Api> { encoder: A::CommandEncoder, fence: A::Fence, fence_value: hal::FenceValue, used_views: Vec<A::TextureView>, used_cmd_bufs: Vec<A::CommandBuffer>, frames_recorded: usize, } impl<A: hal::Api> ExecutionContext<A> { unsafe fn wait_and_clear(&mut self, device: &A::Device) { device.wait(&self.fence, self.fence_value, !0).unwrap(); self.encoder.reset_all(self.used_cmd_bufs.drain(..)); for view in self.used_views.drain(..) { device.destroy_texture_view(view); } self.frames_recorded = 0; } } #[allow(dead_code)] struct Example<A: hal::Api> { instance: A::Instance, adapter: A::Adapter, surface: A::Surface, surface_format: wgt::TextureFormat, device: A::Device, queue: A::Queue, global_group: A::BindGroup, local_group: A::BindGroup, global_group_layout: A::BindGroupLayout, local_group_layout: A::BindGroupLayout, pipeline_layout: A::PipelineLayout, shader: A::ShaderModule, pipeline: A::RenderPipeline, bunnies: Vec<Locals>, local_buffer: A::Buffer, local_alignment: wgt::BufferAddress, global_buffer: A::Buffer, sampler: A::Sampler, texture: A::Texture, texture_view: A::TextureView, contexts: Vec<ExecutionContext<A>>, context_index: usize, extent: [u32; 2], start: Instant, } impl<A: hal::Api> Example<A> { fn init(window: &winit::window::Window) -> Result<Self, hal::InstanceError> { let instance_desc = hal::InstanceDescriptor { name: "example", flags: if cfg!(debug_assertions) { hal::InstanceFlags::all() } else { hal::InstanceFlags::empty() }, }; let instance = unsafe { A::Instance::init(&instance_desc)? }; let mut surface = unsafe { instance.create_surface(window).unwrap() }; let (adapter, capabilities) = unsafe { let mut adapters = instance.enumerate_adapters(); if adapters.is_empty() { return Err(hal::InstanceError); } let exposed = adapters.swap_remove(0); println!( "Surface caps: {:?}", exposed.adapter.surface_capabilities(&surface) ); (exposed.adapter, exposed.capabilities) }; let hal::OpenDevice { device, mut queue } = unsafe { adapter.open(wgt::Features::empty()).unwrap() }; let window_size: (u32, u32) = window.inner_size().into(); let surface_config = hal::SurfaceConfiguration { swap_chain_size: 3, present_mode: wgt::PresentMode::Fifo, composite_alpha_mode: hal::CompositeAlphaMode::Opaque, format: wgt::TextureFormat::Bgra8UnormSrgb, extent: wgt::Extent3d { width: window_size.0, height: window_size.1, depth_or_array_layers: 1, }, usage: hal::TextureUses::COLOR_TARGET, }; unsafe { surface.configure(&device, &surface_config).unwrap(); }; let naga_shader = { let shader_file = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("examples") .join("halmark") .join("shader.wgsl"); let source = std::fs::read_to_string(shader_file).unwrap(); let module = naga::front::wgsl::Parser::new().parse(&source).unwrap(); let info = naga::valid::Validator::new( naga::valid::ValidationFlags::all(), naga::valid::Capabilities::empty(), ) .validate(&module) .unwrap(); hal::NagaShader { module, info } }; let shader_desc = hal::ShaderModuleDescriptor { label: None }; let shader = unsafe { device .create_shader_module(&shader_desc, hal::ShaderInput::Naga(naga_shader)) .unwrap() }; let global_bgl_desc = hal::BindGroupLayoutDescriptor { label: None, entries: &[ wgt::BindGroupLayoutEntry { binding: 0, visibility: wgt::ShaderStages::VERTEX, ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: wgt::BufferSize::new(mem::size_of::<Globals>() as _), }, count: None, }, wgt::BindGroupLayoutEntry { binding: 1, visibility: wgt::ShaderStages::FRAGMENT, ty: wgt::BindingType::Texture { sample_type: wgt::TextureSampleType::Float { filterable: true }, view_dimension: wgt::TextureViewDimension::D2, multisampled: false, }, count: None, }, wgt::BindGroupLayoutEntry { binding: 2, visibility: wgt::ShaderStages::FRAGMENT, ty: wgt::BindingType::Sampler { filtering: true, comparison: false, }, count: None, }, ], }; let global_group_layout = unsafe { device.create_bind_group_layout(&global_bgl_desc).unwrap() }; let local_bgl_desc = hal::BindGroupLayoutDescriptor { entries: &[wgt::BindGroupLayoutEntry { binding: 0, visibility: wgt::ShaderStages::VERTEX, ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: true, min_binding_size: wgt::BufferSize::new(mem::size_of::<Locals>() as _), }, count: None, }], label: None, }; let local_group_layout = unsafe { device.create_bind_group_layout(&local_bgl_desc).unwrap() }; let pipeline_layout_desc = hal::PipelineLayoutDescriptor { label: None, flags: hal::PipelineLayoutFlags::empty(), bind_group_layouts: &[&global_group_layout, &local_group_layout], push_constant_ranges: &[], }; let pipeline_layout = unsafe { device .create_pipeline_layout(&pipeline_layout_desc) .unwrap() }; let pipeline_desc = hal::RenderPipelineDescriptor { label: None, layout: &pipeline_layout, vertex_stage: hal::ProgrammableStage { module: &shader, entry_point: "vs_main", }, vertex_buffers: &[], fragment_stage: Some(hal::ProgrammableStage { module: &shader, entry_point: "fs_main", }), primitive: wgt::PrimitiveState { topology: wgt::PrimitiveTopology::TriangleStrip, ..wgt::PrimitiveState::default() }, depth_stencil: None, multisample: wgt::MultisampleState::default(), color_targets: &[wgt::ColorTargetState { format: surface_config.format, blend: Some(wgt::BlendState::ALPHA_BLENDING), write_mask: wgt::ColorWrites::default(), }], }; let pipeline = unsafe { device.create_render_pipeline(&pipeline_desc).unwrap() }; let texture_data = vec![0xFFu8; 4]; let staging_buffer_desc = hal::BufferDescriptor { label: Some("stage"), size: texture_data.len() as wgt::BufferAddress, usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::COPY_SRC, memory_flags: hal::MemoryFlags::TRANSIENT | hal::MemoryFlags::PREFER_COHERENT, }; let staging_buffer = unsafe { device.create_buffer(&staging_buffer_desc).unwrap() }; unsafe { let mapping = device .map_buffer(&staging_buffer, 0..staging_buffer_desc.size) .unwrap(); ptr::copy_nonoverlapping( texture_data.as_ptr(), mapping.ptr.as_ptr(), texture_data.len(), ); device.unmap_buffer(&staging_buffer).unwrap(); assert!(mapping.is_coherent); } let texture_desc = hal::TextureDescriptor { label: None, size: wgt::Extent3d { width: 1, height: 1, depth_or_array_layers: 1, }, mip_level_count: 1, sample_count: 1, dimension: wgt::TextureDimension::D2, format: wgt::TextureFormat::Rgba8UnormSrgb, usage: hal::TextureUses::COPY_DST | hal::TextureUses::RESOURCE, memory_flags: hal::MemoryFlags::empty(), }; let texture = unsafe { device.create_texture(&texture_desc).unwrap() }; let cmd_encoder_desc = hal::CommandEncoderDescriptor { label: None, queue: &queue, }; let mut cmd_encoder = unsafe { device.create_command_encoder(&cmd_encoder_desc).unwrap() }; unsafe { cmd_encoder.begin_encoding(Some("init")).unwrap() }; { let buffer_barrier = hal::BufferBarrier { buffer: &staging_buffer, usage: hal::BufferUses::empty()..hal::BufferUses::COPY_SRC, }; let texture_barrier1 = hal::TextureBarrier { texture: &texture, range: wgt::ImageSubresourceRange::default(), usage: hal::TextureUses::UNINITIALIZED..hal::TextureUses::COPY_DST, }; let texture_barrier2 = hal::TextureBarrier { texture: &texture, range: wgt::ImageSubresourceRange::default(), usage: hal::TextureUses::COPY_DST..hal::TextureUses::RESOURCE, }; let copy = hal::BufferTextureCopy { buffer_layout: wgt::ImageDataLayout { offset: 0, bytes_per_row: NonZeroU32::new(4), rows_per_image: None, }, texture_base: hal::TextureCopyBase { origin: wgt::Origin3d::ZERO, mip_level: 0, array_layer: 0, aspect: hal::FormatAspects::COLOR, }, size: hal::CopyExtent { width: 1, height: 1, depth: 1, }, }; unsafe { cmd_encoder.transition_buffers(iter::once(buffer_barrier)); cmd_encoder.transition_textures(iter::once(texture_barrier1)); cmd_encoder.copy_buffer_to_texture(&staging_buffer, &texture, iter::once(copy)); cmd_encoder.transition_textures(iter::once(texture_barrier2)); } } let sampler_desc = hal::SamplerDescriptor { label: None, address_modes: [wgt::AddressMode::ClampToEdge; 3], mag_filter: wgt::FilterMode::Linear, min_filter: wgt::FilterMode::Nearest, mipmap_filter: wgt::FilterMode::Nearest, lod_clamp: None, compare: None, anisotropy_clamp: None, border_color: None, }; let sampler = unsafe { device.create_sampler(&sampler_desc).unwrap() }; let globals = Globals { // cgmath::ortho() projection mvp: [ [2.0 / window_size.0 as f32, 0.0, 0.0, 0.0], [0.0, 2.0 / window_size.1 as f32, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [-1.0, -1.0, 0.0, 1.0], ], size: [BUNNY_SIZE; 2], pad: [0.0; 2], }; let global_buffer_desc = hal::BufferDescriptor { label: Some("global"), size: mem::size_of::<Globals>() as wgt::BufferAddress, usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::UNIFORM, memory_flags: hal::MemoryFlags::PREFER_COHERENT, }; let global_buffer = unsafe { let buffer = device.create_buffer(&global_buffer_desc).unwrap(); let mapping = device .map_buffer(&buffer, 0..global_buffer_desc.size) .unwrap(); ptr::copy_nonoverlapping( &globals as *const Globals as *const u8, mapping.ptr.as_ptr(), mem::size_of::<Globals>(), ); device.unmap_buffer(&buffer).unwrap(); assert!(mapping.is_coherent); buffer }; fn align_to( value: wgt::BufferAddress, alignment: wgt::BufferAddress, ) -> wgt::BufferAddress { match value % alignment { 0 => value, other => value - other + alignment, } } let local_alignment = align_to( mem::size_of::<Locals>() as _, capabilities.limits.min_uniform_buffer_offset_alignment as _, ); let local_buffer_desc = hal::BufferDescriptor { label: Some("local"), size: (MAX_BUNNIES as wgt::BufferAddress) * local_alignment, usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::UNIFORM, memory_flags: hal::MemoryFlags::PREFER_COHERENT, }; let local_buffer = unsafe { device.create_buffer(&local_buffer_desc).unwrap() }; let view_desc = hal::TextureViewDescriptor { label: None, format: texture_desc.format, dimension: wgt::TextureViewDimension::D2, usage: hal::TextureUses::RESOURCE, range: wgt::ImageSubresourceRange::default(), }; let texture_view = unsafe { device.create_texture_view(&texture, &view_desc).unwrap() }; let global_group = { let global_buffer_binding = hal::BufferBinding { buffer: &global_buffer, offset: 0, size: None, }; let texture_binding = hal::TextureBinding { view: &texture_view, usage: hal::TextureUses::RESOURCE, }; let global_group_desc = hal::BindGroupDescriptor { label: Some("global"), layout: &global_group_layout, buffers: &[global_buffer_binding], samplers: &[&sampler], textures: &[texture_binding], entries: &[ hal::BindGroupEntry { binding: 0, resource_index: 0, }, hal::BindGroupEntry { binding: 1, resource_index: 0, }, hal::BindGroupEntry { binding: 2, resource_index: 0, }, ], }; unsafe { device.create_bind_group(&global_group_desc).unwrap() } }; let local_group = { let local_buffer_binding = hal::BufferBinding { buffer: &local_buffer, offset: 0, size: wgt::BufferSize::new(mem::size_of::<Locals>() as _), }; let local_group_desc = hal::BindGroupDescriptor { label: Some("local"), layout: &local_group_layout, buffers: &[local_buffer_binding], samplers: &[], textures: &[], entries: &[hal::BindGroupEntry { binding: 0, resource_index: 0, }], }; unsafe { device.create_bind_group(&local_group_desc).unwrap() } }; let init_fence_value = 1; let fence = unsafe { let mut fence = device.create_fence().unwrap(); let init_cmd = cmd_encoder.end_encoding().unwrap(); queue .submit(&[&init_cmd], Some((&mut fence, init_fence_value))) .unwrap(); device.wait(&fence, init_fence_value, !0).unwrap(); device.destroy_buffer(staging_buffer); cmd_encoder.reset_all(iter::once(init_cmd)); fence }; Ok(Example { instance, surface, surface_format: surface_config.format, adapter, device, queue, pipeline_layout, shader, pipeline, global_group, local_group, global_group_layout, local_group_layout, bunnies: Vec::new(), local_buffer, local_alignment, global_buffer, sampler, texture, texture_view, contexts: vec![ExecutionContext { encoder: cmd_encoder, fence, fence_value: init_fence_value + 1, used_views: Vec::new(), used_cmd_bufs: Vec::new(), frames_recorded: 0, }], context_index: 0, extent: [window_size.0, window_size.1], start: Instant::now(), }) } fn is_empty(&self) -> bool { self.bunnies.is_empty() } fn exit(mut self) { unsafe { { let ctx = &mut self.contexts[self.context_index]; self.queue .submit(&[], Some((&mut ctx.fence, ctx.fence_value))) .unwrap(); } for mut ctx in self.contexts { ctx.wait_and_clear(&self.device); self.device.destroy_command_encoder(ctx.encoder); self.device.destroy_fence(ctx.fence); } self.device.destroy_bind_group(self.local_group); self.device.destroy_bind_group(self.global_group); self.device.destroy_buffer(self.local_buffer); self.device.destroy_buffer(self.global_buffer); self.device.destroy_texture_view(self.texture_view); self.device.destroy_texture(self.texture); self.device.destroy_sampler(self.sampler); self.device.destroy_shader_module(self.shader); self.device.destroy_render_pipeline(self.pipeline); self.device .destroy_bind_group_layout(self.local_group_layout); self.device .destroy_bind_group_layout(self.global_group_layout); self.device.destroy_pipeline_layout(self.pipeline_layout); self.surface.unconfigure(&self.device); self.device.exit(self.queue); self.instance.destroy_surface(self.surface); drop(self.adapter); } } fn update(&mut self, event: winit::event::WindowEvent) { if let winit::event::WindowEvent::KeyboardInput { input: winit::event::KeyboardInput { virtual_keycode: Some(winit::event::VirtualKeyCode::Space), state: winit::event::ElementState::Pressed, .. }, .. } = event { let spawn_count = 64 + self.bunnies.len() / 2; let elapsed = self.start.elapsed(); let color = elapsed.as_nanos() as u32; println!( "Spawning {} bunnies, total at {}", spawn_count, self.bunnies.len() + spawn_count ); for i in 0..spawn_count { let random = ((elapsed.as_nanos() * (i + 1) as u128) & 0xFF) as f32 / 255.0; let speed = random * MAX_VELOCITY - (MAX_VELOCITY * 0.5); self.bunnies.push(Locals { position: [0.0, 0.5 * (self.extent[1] as f32)], velocity: [speed, 0.0], color, _pad: 0, }); } } } fn render(&mut self) { let delta = 0.01; for bunny in self.bunnies.iter_mut() { bunny.position[0] += bunny.velocity[0] * delta; bunny.position[1] += bunny.velocity[1] * delta; bunny.velocity[1] += GRAVITY * delta; if (bunny.velocity[0] > 0.0 && bunny.position[0] + 0.5 * BUNNY_SIZE > self.extent[0] as f32) || (bunny.velocity[0] < 0.0 && bunny.position[0] - 0.5 * BUNNY_SIZE < 0.0) { bunny.velocity[0] *= -1.0; } if bunny.velocity[1] < 0.0 && bunny.position[1] < 0.5 * BUNNY_SIZE { bunny.velocity[1] *= -1.0; } } if !self.bunnies.is_empty() { let size = self.bunnies.len() * self.local_alignment as usize; unsafe { let mapping = self .device .map_buffer(&self.local_buffer, 0..size as wgt::BufferAddress) .unwrap(); ptr::copy_nonoverlapping( self.bunnies.as_ptr() as *const u8, mapping.ptr.as_ptr(), size, ); assert!(mapping.is_coherent); self.device.unmap_buffer(&self.local_buffer).unwrap(); } } let ctx = &mut self.contexts[self.context_index]; let surface_tex = unsafe { self.surface.acquire_texture(!0).unwrap().unwrap().texture }; let target_barrier0 = hal::TextureBarrier { texture: surface_tex.borrow(), range: wgt::ImageSubresourceRange::default(), usage: hal::TextureUses::UNINITIALIZED..hal::TextureUses::COLOR_TARGET, }; unsafe { ctx.encoder.begin_encoding(Some("frame")).unwrap(); ctx.encoder.transition_textures(iter::once(target_barrier0)); } let surface_view_desc = hal::TextureViewDescriptor { label: None, format: self.surface_format, dimension: wgt::TextureViewDimension::D2, usage: hal::TextureUses::COLOR_TARGET, range: wgt::ImageSubresourceRange::default(), }; let surface_tex_view = unsafe { self.device .create_texture_view(surface_tex.borrow(), &surface_view_desc) .unwrap() }; let pass_desc = hal::RenderPassDescriptor { label: None, extent: wgt::Extent3d { width: self.extent[0], height: self.extent[1], depth_or_array_layers: 1, }, sample_count: 1, color_attachments: &[hal::ColorAttachment { target: hal::Attachment { view: &surface_tex_view, usage: hal::TextureUses::COLOR_TARGET, }, resolve_target: None, ops: hal::AttachmentOps::STORE, clear_value: wgt::Color { r: 0.1, g: 0.2, b: 0.3, a: 1.0, }, }], depth_stencil_attachment: None, }; unsafe { ctx.encoder.begin_render_pass(&pass_desc); ctx.encoder.set_render_pipeline(&self.pipeline); ctx.encoder .set_bind_group(&self.pipeline_layout, 0, &self.global_group, &[]); } for i in 0..self.bunnies.len() { let offset = (i as wgt::DynamicOffset) * (self.local_alignment as wgt::DynamicOffset); unsafe { ctx.encoder .set_bind_group(&self.pipeline_layout, 1, &self.local_group, &[offset]); ctx.encoder.draw(0, 4, 0, 1); } } ctx.frames_recorded += 1; let do_fence = ctx.frames_recorded > COMMAND_BUFFER_PER_CONTEXT; let target_barrier1 = hal::TextureBarrier { texture: surface_tex.borrow(), range: wgt::ImageSubresourceRange::default(), usage: hal::TextureUses::COLOR_TARGET..hal::TextureUses::empty(), }; unsafe { ctx.encoder.end_render_pass(); ctx.encoder.transition_textures(iter::once(target_barrier1)); } unsafe { let cmd_buf = ctx.encoder.end_encoding().unwrap(); let fence_param = if do_fence { Some((&mut ctx.fence, ctx.fence_value)) } else { None }; self.queue.submit(&[&cmd_buf], fence_param).unwrap(); self.queue.present(&mut self.surface, surface_tex).unwrap(); ctx.used_cmd_bufs.push(cmd_buf); ctx.used_views.push(surface_tex_view); }; if do_fence { log::info!("Context switch from {}", self.context_index); let old_fence_value = ctx.fence_value; if self.contexts.len() == 1 { let hal_desc = hal::CommandEncoderDescriptor { label: None, queue: &self.queue, }; self.contexts.push(unsafe { ExecutionContext { encoder: self.device.create_command_encoder(&hal_desc).unwrap(), fence: self.device.create_fence().unwrap(), fence_value: 0, used_views: Vec::new(), used_cmd_bufs: Vec::new(), frames_recorded: 0, } }); } self.context_index = (self.context_index + 1) % self.contexts.len(); let next = &mut self.contexts[self.context_index]; unsafe { next.wait_and_clear(&self.device); } next.fence_value = old_fence_value + 1; } } } #[cfg(all(feature = "metal"))] type Api = hal::api::Metal; #[cfg(all(feature = "vulkan", not(feature = "metal")))] type Api = hal::api::Vulkan; #[cfg(all(feature = "gles", not(feature = "metal"), not(feature = "vulkan")))] type Api = hal::api::Gles; #[cfg(all( feature = "dx12", not(feature = "metal"), not(feature = "vulkan"), not(feature = "gles") ))] type Api = hal::api::Dx12; #[cfg(not(any( feature = "metal", feature = "vulkan", feature = "gles", feature = "dx12" )))] type Api = hal::api::Empty; fn main() { env_logger::init(); let event_loop = winit::event_loop::EventLoop::new(); let window = winit::window::WindowBuilder::new() .with_title("hal-bunnymark") .build(&event_loop) .unwrap(); let example_result = Example::<Api>::init(&window); let mut example = Some(example_result.expect("Selected backend is not supported")); let mut last_frame_inst = Instant::now(); let (mut frame_count, mut accum_time) = (0, 0.0); event_loop.run(move |event, _, control_flow| { let _ = &window; // force ownership by the closure *control_flow = winit::event_loop::ControlFlow::Poll; match event { winit::event::Event::RedrawEventsCleared => { window.request_redraw(); } winit::event::Event::WindowEvent { event, .. } => match event { winit::event::WindowEvent::KeyboardInput { input: winit::event::KeyboardInput { virtual_keycode: Some(winit::event::VirtualKeyCode::Escape), state: winit::event::ElementState::Pressed, .. }, .. } | winit::event::WindowEvent::CloseRequested => { *control_flow = winit::event_loop::ControlFlow::Exit; } _ => { example.as_mut().unwrap().update(event); } }, winit::event::Event::RedrawRequested(_) => { let ex = example.as_mut().unwrap(); { accum_time += last_frame_inst.elapsed().as_secs_f32(); last_frame_inst = Instant::now(); frame_count += 1; if frame_count == 100 && !ex.is_empty() { println!( "Avg frame time {}ms", accum_time * 1000.0 / frame_count as f32 ); accum_time = 0.0; frame_count = 0; } } ex.render(); } winit::event::Event::LoopDestroyed => { example.take().unwrap().exit(); } _ => {} } }); }
36.856265
99
0.509383
dbd6d75173f5c4a4e87c9141952cb35bcb1f9ecd
3,006
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use bytes::BufMut; use super::String2StringFunction; use super::StringOperator; #[derive(Clone, Default)] pub struct Soundex { buf: String, } impl Soundex { #[inline(always)] fn number_map(i: char) -> Option<u8> { match i.to_ascii_lowercase() { 'b' | 'f' | 'p' | 'v' => Some(b'1'), 'c' | 'g' | 'j' | 'k' | 'q' | 's' | 'x' | 'z' => Some(b'2'), 'd' | 't' => Some(b'3'), 'l' => Some(b'4'), 'm' | 'n' => Some(b'5'), 'r' => Some(b'6'), _ => Some(b'0'), } } #[inline(always)] fn is_drop(c: char) -> bool { matches!( c.to_ascii_lowercase(), 'a' | 'e' | 'i' | 'o' | 'u' | 'y' | 'h' | 'w' ) } // https://github.com/mysql/mysql-server/blob/3290a66c89eb1625a7058e0ef732432b6952b435/sql/item_strfunc.cc#L1919 #[inline(always)] fn is_uni_alphabetic(c: char) -> bool { ('a'..='z').contains(&c) || ('A'..='Z').contains(&c) || c as i32 >= 0xC0 } } impl StringOperator for Soundex { #[inline] fn apply_with_no_null<'a>(&'a mut self, data: &'a [u8], mut buffer: &mut [u8]) -> usize { let mut last = None; let mut count = 0; self.buf.clear(); for ch in String::from_utf8_lossy(data).chars() { let score = Self::number_map(ch); if last.is_none() { if !Self::is_uni_alphabetic(ch) { continue; } last = score; self.buf.push(ch.to_ascii_uppercase()); } else { if !ch.is_ascii_alphabetic() || Self::is_drop(ch) || score.is_none() || score == last { continue; } last = score; self.buf.push(score.unwrap() as char); } count += 1; } // add '0' if !self.buf.is_empty() && count < 4 { self.buf.extend(vec!['0'; 4 - count]) } let bytes = self.buf.as_bytes(); buffer.put_slice(bytes); bytes.len() } fn estimate_bytes(&self, array: &common_datavalues::prelude::DFStringArray) -> usize { usize::max(array.inner().values().len(), 4 * array.len()) } } pub type SoundexFunction = String2StringFunction<Soundex>;
29.470588
116
0.516966
bf004313e705e3d99829992be935b6381789c3d4
18,971
// Copyright 2020 The Matrix.org Foundation C.I.C. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod pk_signing; use serde::{Deserialize, Serialize}; use serde_json::Error as JsonError; use std::{ collections::BTreeMap, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, }; use matrix_sdk_common::{ api::r0::keys::{upload_signatures::Request as SignatureUploadRequest, KeyUsage}, encryption::DeviceKeys, identifiers::{DeviceKeyAlgorithm, DeviceKeyId, UserId}, locks::Mutex, }; use crate::{ error::SignatureError, requests::UploadSigningKeysRequest, OwnUserIdentity, ReadOnlyAccount, ReadOnlyDevice, UserIdentity, }; use pk_signing::{MasterSigning, PickledSignings, SelfSigning, Signing, SigningError, UserSigning}; /// Private cross signing identity. /// /// This object holds the private and public ed25519 key triplet that is used /// for cross signing. /// /// The object might be comletely empty or have only some of the key pairs /// available. /// /// It can be used to sign devices or other identities. #[derive(Clone, Debug)] pub struct PrivateCrossSigningIdentity { user_id: Arc<UserId>, shared: Arc<AtomicBool>, pub(crate) master_key: Arc<Mutex<Option<MasterSigning>>>, pub(crate) user_signing_key: Arc<Mutex<Option<UserSigning>>>, pub(crate) self_signing_key: Arc<Mutex<Option<SelfSigning>>>, } /// The pickled version of a `PrivateCrossSigningIdentity`. /// /// Can be used to store the identity. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PickledCrossSigningIdentity { /// The user id of the identity owner. pub user_id: UserId, /// Have the public keys of the identity been shared. pub shared: bool, /// The encrypted pickle of the identity. pub pickle: String, } impl PrivateCrossSigningIdentity { /// Get the user id that this identity belongs to. pub fn user_id(&self) -> &UserId { &self.user_id } /// Is the identity empty. /// /// An empty identity doesn't contain any private keys. /// /// It is usual for the identity not to contain the master key since the /// master key is only needed to sign the subkeys. /// /// An empty identity indicates that either no identity was created for this /// use or that another device created it and hasn't shared it yet with us. pub async fn is_empty(&self) -> bool { let has_master = self.master_key.lock().await.is_some(); let has_user = self.user_signing_key.lock().await.is_some(); let has_self = self.self_signing_key.lock().await.is_some(); !(has_master && has_user && has_self) } /// Create a new empty identity. pub(crate) fn empty(user_id: UserId) -> Self { Self { user_id: Arc::new(user_id), shared: Arc::new(AtomicBool::new(false)), master_key: Arc::new(Mutex::new(None)), self_signing_key: Arc::new(Mutex::new(None)), user_signing_key: Arc::new(Mutex::new(None)), } } pub(crate) async fn as_public_identity(&self) -> Result<OwnUserIdentity, SignatureError> { let master = self .master_key .lock() .await .as_ref() .ok_or(SignatureError::MissingSigningKey)? .public_key .clone(); let self_signing = self .self_signing_key .lock() .await .as_ref() .ok_or(SignatureError::MissingSigningKey)? .public_key .clone(); let user_signing = self .user_signing_key .lock() .await .as_ref() .ok_or(SignatureError::MissingSigningKey)? .public_key .clone(); let identity = OwnUserIdentity::new(master, self_signing, user_signing)?; identity.mark_as_verified(); Ok(identity) } /// Sign the given public user identity with this private identity. pub(crate) async fn sign_user( &self, user_identity: &UserIdentity, ) -> Result<SignatureUploadRequest, SignatureError> { let signed_keys = self .user_signing_key .lock() .await .as_ref() .ok_or(SignatureError::MissingSigningKey)? .sign_user(&user_identity) .await?; Ok(SignatureUploadRequest::new(signed_keys)) } /// Sign the given device keys with this identity. pub(crate) async fn sign_device( &self, device: &ReadOnlyDevice, ) -> Result<SignatureUploadRequest, SignatureError> { let mut device_keys = device.as_device_keys(); device_keys.signatures.clear(); self.sign_device_keys(&mut device_keys).await } /// Sign an Olm account with this private identity. pub(crate) async fn sign_account( &self, account: &ReadOnlyAccount, ) -> Result<SignatureUploadRequest, SignatureError> { let mut device_keys = account.unsigned_device_keys(); self.sign_device_keys(&mut device_keys).await } pub(crate) async fn sign_device_keys( &self, mut device_keys: &mut DeviceKeys, ) -> Result<SignatureUploadRequest, SignatureError> { self.self_signing_key .lock() .await .as_ref() .ok_or(SignatureError::MissingSigningKey)? .sign_device(&mut device_keys) .await?; let mut signed_keys = BTreeMap::new(); signed_keys .entry((&*self.user_id).to_owned()) .or_insert_with(BTreeMap::new) .insert( device_keys.device_id.to_string(), serde_json::to_value(device_keys)?, ); Ok(SignatureUploadRequest::new(signed_keys)) } /// Create a new identity for the given Olm Account. /// /// Returns the new identity, the upload signing keys request and a /// signature upload request that contains the signature of the account /// signed by the self signing key. /// /// # Arguments /// /// * `account` - The Olm account that is creating the new identity. The /// account will sign the master key and the self signing key will sign the /// account. pub(crate) async fn new_with_account( account: &ReadOnlyAccount, ) -> (Self, UploadSigningKeysRequest, SignatureUploadRequest) { let master = Signing::new(); let mut public_key = master.cross_signing_key(account.user_id().to_owned(), KeyUsage::Master); let signature = account .sign_json( serde_json::to_value(&public_key) .expect("Can't convert own public master key to json"), ) .await; public_key .signatures .entry(account.user_id().to_owned()) .or_insert_with(BTreeMap::new) .insert( DeviceKeyId::from_parts(DeviceKeyAlgorithm::Ed25519, account.device_id()) .to_string(), signature, ); let master = MasterSigning { inner: master, public_key: public_key.into(), }; let identity = Self::new_helper(account.user_id(), master).await; let signature_request = identity .sign_account(account) .await .expect("Can't sign own device with new cross signign keys"); let request = identity.as_upload_request().await; (identity, request, signature_request) } async fn new_helper(user_id: &UserId, master: MasterSigning) -> Self { let user = Signing::new(); let mut public_key = user.cross_signing_key(user_id.to_owned(), KeyUsage::UserSigning); master.sign_subkey(&mut public_key).await; let user = UserSigning { inner: user, public_key: public_key.into(), }; let self_signing = Signing::new(); let mut public_key = self_signing.cross_signing_key(user_id.to_owned(), KeyUsage::SelfSigning); master.sign_subkey(&mut public_key).await; let self_signing = SelfSigning { inner: self_signing, public_key: public_key.into(), }; Self { user_id: Arc::new(user_id.to_owned()), shared: Arc::new(AtomicBool::new(false)), master_key: Arc::new(Mutex::new(Some(master))), self_signing_key: Arc::new(Mutex::new(Some(self_signing))), user_signing_key: Arc::new(Mutex::new(Some(user))), } } /// Create a new cross signing identity without signing the device that /// created it. #[cfg(test)] pub(crate) async fn new(user_id: UserId) -> Self { let master = Signing::new(); let public_key = master.cross_signing_key(user_id.clone(), KeyUsage::Master); let master = MasterSigning { inner: master, public_key: public_key.into(), }; Self::new_helper(&user_id, master).await } /// Mark the identity as shared. pub fn mark_as_shared(&self) { self.shared.store(true, Ordering::SeqCst) } /// Has the identity been shared. /// /// A shared identity here means that the public keys of the identity have /// been uploaded to the server. pub fn shared(&self) -> bool { self.shared.load(Ordering::SeqCst) } /// Store the cross signing identity as a pickle. /// /// # Arguments /// /// * `pickle_key` - The key that should be used to encrypt the signing /// object, must be 32 bytes long. /// /// # Panics /// /// This will panic if the provided pickle key isn't 32 bytes long. pub async fn pickle( &self, pickle_key: &[u8], ) -> Result<PickledCrossSigningIdentity, JsonError> { let master_key = if let Some(m) = self.master_key.lock().await.as_ref() { Some(m.pickle(pickle_key).await) } else { None }; let self_signing_key = if let Some(m) = self.self_signing_key.lock().await.as_ref() { Some(m.pickle(pickle_key).await) } else { None }; let user_signing_key = if let Some(m) = self.user_signing_key.lock().await.as_ref() { Some(m.pickle(pickle_key).await) } else { None }; let pickle = PickledSignings { master_key, user_signing_key, self_signing_key, }; let pickle = serde_json::to_string(&pickle)?; Ok(PickledCrossSigningIdentity { user_id: self.user_id.as_ref().to_owned(), shared: self.shared(), pickle, }) } /// Restore the private cross signing identity from a pickle. /// /// # Panic /// /// Panics if the pickle_key isn't 32 bytes long. pub async fn from_pickle( pickle: PickledCrossSigningIdentity, pickle_key: &[u8], ) -> Result<Self, SigningError> { let signings: PickledSignings = serde_json::from_str(&pickle.pickle)?; let master = if let Some(m) = signings.master_key { Some(MasterSigning::from_pickle(m, pickle_key)?) } else { None }; let self_signing = if let Some(s) = signings.self_signing_key { Some(SelfSigning::from_pickle(s, pickle_key)?) } else { None }; let user_signing = if let Some(u) = signings.user_signing_key { Some(UserSigning::from_pickle(u, pickle_key)?) } else { None }; Ok(Self { user_id: Arc::new(pickle.user_id), shared: Arc::new(AtomicBool::from(pickle.shared)), master_key: Arc::new(Mutex::new(master)), self_signing_key: Arc::new(Mutex::new(self_signing)), user_signing_key: Arc::new(Mutex::new(user_signing)), }) } /// Get the upload request that is needed to share the public keys of this /// identity. pub(crate) async fn as_upload_request(&self) -> UploadSigningKeysRequest { let master_key = self .master_key .lock() .await .as_ref() .cloned() .map(|k| k.public_key.into()); let user_signing_key = self .user_signing_key .lock() .await .as_ref() .cloned() .map(|k| k.public_key.into()); let self_signing_key = self .self_signing_key .lock() .await .as_ref() .cloned() .map(|k| k.public_key.into()); UploadSigningKeysRequest { master_key, self_signing_key, user_signing_key, } } } #[cfg(test)] mod test { use crate::{ identities::{ReadOnlyDevice, UserIdentity}, olm::ReadOnlyAccount, }; use std::{collections::BTreeMap, sync::Arc}; use super::{PrivateCrossSigningIdentity, Signing}; use matrix_sdk_common::{ api::r0::keys::CrossSigningKey, identifiers::{user_id, UserId}, }; use matrix_sdk_test::async_test; fn user_id() -> UserId { user_id!("@example:localhost") } fn pickle_key() -> &'static [u8] { &[0u8; 32] } #[test] fn signing_creation() { let signing = Signing::new(); assert!(!signing.public_key().as_str().is_empty()); } #[async_test] async fn signature_verification() { let signing = Signing::new(); let message = "Hello world"; let signature = signing.sign(message).await; assert!(signing.verify(message, &signature).await.is_ok()); } #[async_test] async fn pickling_signing() { let signing = Signing::new(); let pickled = signing.pickle(pickle_key()).await; let unpickled = Signing::from_pickle(pickled, pickle_key()).unwrap(); assert_eq!(signing.public_key(), unpickled.public_key()); } #[async_test] async fn private_identity_creation() { let identity = PrivateCrossSigningIdentity::new(user_id()).await; let master_key = identity.master_key.lock().await; let master_key = master_key.as_ref().unwrap(); assert!(master_key .public_key .verify_subkey( &identity .self_signing_key .lock() .await .as_ref() .unwrap() .public_key, ) .is_ok()); assert!(master_key .public_key .verify_subkey( &identity .user_signing_key .lock() .await .as_ref() .unwrap() .public_key, ) .is_ok()); } #[async_test] async fn identity_pickling() { let identity = PrivateCrossSigningIdentity::new(user_id()).await; let pickled = identity.pickle(pickle_key()).await.unwrap(); let unpickled = PrivateCrossSigningIdentity::from_pickle(pickled, pickle_key()) .await .unwrap(); assert_eq!(identity.user_id, unpickled.user_id); assert_eq!( &*identity.master_key.lock().await, &*unpickled.master_key.lock().await ); assert_eq!( &*identity.user_signing_key.lock().await, &*unpickled.user_signing_key.lock().await ); assert_eq!( &*identity.self_signing_key.lock().await, &*unpickled.self_signing_key.lock().await ); } #[async_test] async fn private_identity_signed_by_accound() { let account = ReadOnlyAccount::new(&user_id(), "DEVICEID".into()); let (identity, _, _) = PrivateCrossSigningIdentity::new_with_account(&account).await; let master = identity.master_key.lock().await; let master = master.as_ref().unwrap(); assert!(!master.public_key.signatures().is_empty()); } #[async_test] async fn sign_device() { let account = ReadOnlyAccount::new(&user_id(), "DEVICEID".into()); let (identity, _, _) = PrivateCrossSigningIdentity::new_with_account(&account).await; let mut device = ReadOnlyDevice::from_account(&account).await; let self_signing = identity.self_signing_key.lock().await; let self_signing = self_signing.as_ref().unwrap(); let mut device_keys = device.as_device_keys(); self_signing.sign_device(&mut device_keys).await.unwrap(); device.signatures = Arc::new(device_keys.signatures); let public_key = &self_signing.public_key; public_key.verify_device(&device).unwrap() } #[async_test] async fn sign_user_identity() { let account = ReadOnlyAccount::new(&user_id(), "DEVICEID".into()); let (identity, _, _) = PrivateCrossSigningIdentity::new_with_account(&account).await; let bob_account = ReadOnlyAccount::new(&user_id!("@bob:localhost"), "DEVICEID".into()); let (bob_private, _, _) = PrivateCrossSigningIdentity::new_with_account(&bob_account).await; let mut bob_public = UserIdentity::from_private(&bob_private).await; let user_signing = identity.user_signing_key.lock().await; let user_signing = user_signing.as_ref().unwrap(); let signatures = user_signing.sign_user(&bob_public).await.unwrap(); let (key_id, signature) = signatures .iter() .next() .unwrap() .1 .iter() .next() .map(|(k, s)| (k.to_string(), serde_json::from_value(s.to_owned()).unwrap())) .unwrap(); let mut master: CrossSigningKey = bob_public.master_key.as_ref().clone(); master .signatures .entry(identity.user_id().to_owned()) .or_insert_with(BTreeMap::new) .insert(key_id, signature); bob_public.master_key = master.into(); user_signing .public_key .verify_master_key(bob_public.master_key()) .unwrap(); } }
31.618333
100
0.588055
22c9ae606d37aeb7efb31323240703196368c1bd
124,962
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Unicode string manipulation (`str` type) # Basic Usage Rust's string type is one of the core primitive types of the language. While represented by the name `str`, the name `str` is not actually a valid type in Rust. Each string must also be decorated with its ownership. This means that there are three common kinds of strings in rust: * `~str` - This is an owned string. This type obeys all of the normal semantics of the `~T` types, meaning that it has one, and only one, owner. This type cannot be implicitly copied, and is moved out of when passed to other functions. * `@str` - This is a managed string. Similarly to `@T`, this type can be implicitly copied, and each implicit copy will increment the reference count to the string. This means that there is no "true owner" of the string, and the string will be deallocated when the reference count reaches 0. * `&str` - Finally, this is the borrowed string type. This type of string can only be created from one of the other two kinds of strings. As the name "borrowed" implies, this type of string is owned elsewhere, and this string cannot be moved out of. As an example, here's a few different kinds of strings. ```rust #[feature(managed_boxes)]; fn main() { let owned_string = ~"I am an owned string"; let managed_string = @"This string is garbage-collected"; let borrowed_string1 = "This string is borrowed with the 'static lifetime"; let borrowed_string2: &str = owned_string; // owned strings can be borrowed let borrowed_string3: &str = managed_string; // managed strings can also be borrowed } ``` From the example above, you can see that rust has 3 different kinds of string literals. The owned/managed literals correspond to the owned/managed string types, but the "borrowed literal" is actually more akin to C's concept of a static string. When a string is declared without a `~` or `@` sigil, then the string is allocated statically in the rodata of the executable/library. The string then has the type `&'static str` meaning that the string is valid for the `'static` lifetime, otherwise known as the lifetime of the entire program. As can be inferred from the type, these static strings are not mutable. # Mutability Many languages have immutable strings by default, and rust has a particular flavor on this idea. As with the rest of Rust types, strings are immutable by default. If a string is declared as `mut`, however, it may be mutated. This works the same way as the rest of Rust's type system in the sense that if there's a mutable reference to a string, there may only be one mutable reference to that string. With these guarantees, strings can easily transition between being mutable/immutable with the same benefits of having mutable strings in other languages. ```rust let mut buf = ~"testing"; buf.push_char(' '); buf.push_str("123"); assert_eq!(buf, ~"testing 123"); ``` # Representation Rust's string type, `str`, is a sequence of unicode codepoints encoded as a stream of UTF-8 bytes. All safely-created strings are guaranteed to be validly encoded UTF-8 sequences. Additionally, strings are not null-terminated and can contain null codepoints. The actual representation of strings have direct mappings to vectors: * `~str` is the same as `~[u8]` * `&str` is the same as `&[u8]` * `@str` is the same as `@[u8]` */ use at_vec; use cast; use cast::transmute; use char; use char::Char; use clone::{Clone, DeepClone}; use container::{Container, Mutable}; use iter::{Iterator, FromIterator, Extendable, range}; use iter::{Filter, AdditiveIterator, Map}; use iter::{Rev, DoubleEndedIterator, ExactSize}; use libc; use num::{Saturating}; use option::{None, Option, Some}; use ptr; use ptr::RawPtr; use to_str::ToStr; use from_str::FromStr; use uint; use vec; use vec::{OwnedVector, OwnedCopyableVector, ImmutableVector, MutableVector}; use default::Default; use send_str::{SendStr, SendStrOwned}; use unstable::raw::Repr; /* Section: Creating a string */ /// Consumes a vector of bytes to create a new utf-8 string. /// Returns None if the vector contains invalid UTF-8. pub fn from_utf8_owned(vv: ~[u8]) -> Option<~str> { if is_utf8(vv) { Some(unsafe { raw::from_utf8_owned(vv) }) } else { None } } /// Converts a vector to a string slice without performing any allocations. /// /// Once the slice has been validated as utf-8, it is transmuted in-place and /// returned as a '&str' instead of a '&[u8]' /// /// Returns None if the slice is not utf-8. pub fn from_utf8<'a>(v: &'a [u8]) -> Option<&'a str> { if is_utf8(v) { Some(unsafe { raw::from_utf8(v) }) } else { None } } impl ToStr for ~str { #[inline] fn to_str(&self) -> ~str { self.to_owned() } } impl FromStr for ~str { #[inline] fn from_str(s: &str) -> Option<~str> { Some(s.to_owned()) } } impl<'a> ToStr for &'a str { #[inline] fn to_str(&self) -> ~str { self.to_owned() } } impl ToStr for @str { #[inline] fn to_str(&self) -> ~str { self.to_owned() } } impl<'a> FromStr for @str { #[inline] fn from_str(s: &str) -> Option<@str> { Some(s.to_managed()) } } /// Convert a byte to a UTF-8 string /// /// # Failure /// /// Fails if invalid UTF-8 pub fn from_byte(b: u8) -> ~str { assert!(b < 128u8); unsafe { ::cast::transmute(~[b]) } } /// Convert a char to a string pub fn from_char(ch: char) -> ~str { let mut buf = ~""; buf.push_char(ch); buf } /// Convert a vector of chars to a string pub fn from_chars(chs: &[char]) -> ~str { let mut buf = ~""; buf.reserve(chs.len()); for ch in chs.iter() { buf.push_char(*ch) } buf } #[doc(hidden)] pub fn push_str(lhs: &mut ~str, rhs: &str) { lhs.push_str(rhs) } /// Methods for vectors of strings pub trait StrVector { /// Concatenate a vector of strings. fn concat(&self) -> ~str; /// Concatenate a vector of strings, placing a given separator between each. fn connect(&self, sep: &str) -> ~str; } impl<'a, S: Str> StrVector for &'a [S] { fn concat(&self) -> ~str { if self.is_empty() { return ~""; } // `len` calculation may overflow but push_str but will check boundaries let len = self.iter().map(|s| s.as_slice().len()).sum(); let mut result = with_capacity(len); for s in self.iter() { result.push_str(s.as_slice()) } result } fn connect(&self, sep: &str) -> ~str { if self.is_empty() { return ~""; } // concat is faster if sep.is_empty() { return self.concat(); } // this is wrong without the guarantee that `self` is non-empty // `len` calculation may overflow but push_str but will check boundaries let len = sep.len() * (self.len() - 1) + self.iter().map(|s| s.as_slice().len()).sum(); let mut result = with_capacity(len); let mut first = true; for s in self.iter() { if first { first = false; } else { result.push_str(sep); } result.push_str(s.as_slice()); } result } } /// Something that can be used to compare against a character pub trait CharEq { /// Determine if the splitter should split at the given character fn matches(&self, char) -> bool; /// Indicate if this is only concerned about ASCII characters, /// which can allow for a faster implementation. fn only_ascii(&self) -> bool; } impl CharEq for char { #[inline] fn matches(&self, c: char) -> bool { *self == c } fn only_ascii(&self) -> bool { (*self as uint) < 128 } } impl<'a> CharEq for 'a |char| -> bool { #[inline] fn matches(&self, c: char) -> bool { (*self)(c) } fn only_ascii(&self) -> bool { false } } impl CharEq for extern "Rust" fn(char) -> bool { #[inline] fn matches(&self, c: char) -> bool { (*self)(c) } fn only_ascii(&self) -> bool { false } } impl<'a, C: CharEq> CharEq for &'a [C] { #[inline] fn matches(&self, c: char) -> bool { self.iter().any(|m| m.matches(c)) } fn only_ascii(&self) -> bool { self.iter().all(|m| m.only_ascii()) } } /* Section: Iterators */ /// External iterator for a string's characters. /// Use with the `std::iter` module. #[deriving(Clone)] pub struct Chars<'a> { /// The slice remaining to be iterated priv string: &'a str, } impl<'a> Iterator<char> for Chars<'a> { #[inline] fn next(&mut self) -> Option<char> { // Decode the next codepoint, then update // the slice to be just the remaining part if self.string.len() != 0 { let CharRange {ch, next} = self.string.char_range_at(0); unsafe { self.string = raw::slice_unchecked(self.string, next, self.string.len()); } Some(ch) } else { None } } #[inline] fn size_hint(&self) -> (uint, Option<uint>) { (self.string.len().saturating_add(3)/4, Some(self.string.len())) } } impl<'a> DoubleEndedIterator<char> for Chars<'a> { #[inline] fn next_back(&mut self) -> Option<char> { if self.string.len() != 0 { let CharRange {ch, next} = self.string.char_range_at_reverse(self.string.len()); unsafe { self.string = raw::slice_unchecked(self.string, 0, next); } Some(ch) } else { None } } } /// External iterator for a string's characters and their byte offsets. /// Use with the `std::iter` module. #[deriving(Clone)] pub struct CharOffsets<'a> { /// The original string to be iterated priv string: &'a str, priv iter: Chars<'a>, } impl<'a> Iterator<(uint, char)> for CharOffsets<'a> { #[inline] fn next(&mut self) -> Option<(uint, char)> { // Compute the byte offset by using the pointer offset between // the original string slice and the iterator's remaining part let offset = self.iter.string.as_ptr() as uint - self.string.as_ptr() as uint; self.iter.next().map(|ch| (offset, ch)) } #[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() } } impl<'a> DoubleEndedIterator<(uint, char)> for CharOffsets<'a> { #[inline] fn next_back(&mut self) -> Option<(uint, char)> { self.iter.next_back().map(|ch| { let offset = self.iter.string.len() + self.iter.string.as_ptr() as uint - self.string.as_ptr() as uint; (offset, ch) }) } } /// External iterator for a string's characters in reverse order. /// Use with the `std::iter` module. pub type RevChars<'a> = Rev<Chars<'a>>; /// External iterator for a string's characters and their byte offsets in reverse order. /// Use with the `std::iter` module. pub type RevCharOffsets<'a> = Rev<CharOffsets<'a>>; /// External iterator for a string's bytes. /// Use with the `std::iter` module. pub type Bytes<'a> = Map<'a, &'a u8, u8, vec::Items<'a, u8>>; /// External iterator for a string's bytes in reverse order. /// Use with the `std::iter` module. pub type RevBytes<'a> = Rev<Bytes<'a>>; /// An iterator over the substrings of a string, separated by `sep`. #[deriving(Clone)] pub struct CharSplits<'a, Sep> { /// The slice remaining to be iterated priv string: &'a str, priv sep: Sep, /// Whether an empty string at the end is allowed priv allow_trailing_empty: bool, priv only_ascii: bool, priv finished: bool, } /// An iterator over the substrings of a string, separated by `sep`, /// starting from the back of the string. pub type RevCharSplits<'a, Sep> = Rev<CharSplits<'a, Sep>>; /// An iterator over the substrings of a string, separated by `sep`, /// splitting at most `count` times. #[deriving(Clone)] pub struct CharSplitsN<'a, Sep> { priv iter: CharSplits<'a, Sep>, /// The number of splits remaining priv count: uint, priv invert: bool, } /// An iterator over the words of a string, separated by an sequence of whitespace pub type Words<'a> = Filter<'a, &'a str, CharSplits<'a, extern "Rust" fn(char) -> bool>>; /// An iterator over the lines of a string, separated by either `\n` or (`\r\n`). pub type AnyLines<'a> = Map<'a, &'a str, &'a str, CharSplits<'a, char>>; impl<'a, Sep> CharSplits<'a, Sep> { #[inline] fn get_end(&mut self) -> Option<&'a str> { if !self.finished && (self.allow_trailing_empty || self.string.len() > 0) { self.finished = true; Some(self.string) } else { None } } } impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplits<'a, Sep> { #[inline] fn next(&mut self) -> Option<&'a str> { if self.finished { return None } let mut next_split = None; if self.only_ascii { for (idx, byte) in self.string.bytes().enumerate() { if self.sep.matches(byte as char) && byte < 128u8 { next_split = Some((idx, idx + 1)); break; } } } else { for (idx, ch) in self.string.char_indices() { if self.sep.matches(ch) { next_split = Some((idx, self.string.char_range_at(idx).next)); break; } } } match next_split { Some((a, b)) => unsafe { let elt = raw::slice_unchecked(self.string, 0, a); self.string = raw::slice_unchecked(self.string, b, self.string.len()); Some(elt) }, None => self.get_end(), } } } impl<'a, Sep: CharEq> DoubleEndedIterator<&'a str> for CharSplits<'a, Sep> { #[inline] fn next_back(&mut self) -> Option<&'a str> { if self.finished { return None } if !self.allow_trailing_empty { self.allow_trailing_empty = true; match self.next_back() { Some(elt) if !elt.is_empty() => return Some(elt), _ => if self.finished { return None } } } let len = self.string.len(); let mut next_split = None; if self.only_ascii { for (idx, byte) in self.string.bytes().enumerate().rev() { if self.sep.matches(byte as char) && byte < 128u8 { next_split = Some((idx, idx + 1)); break; } } } else { for (idx, ch) in self.string.char_indices_rev() { if self.sep.matches(ch) { next_split = Some((idx, self.string.char_range_at(idx).next)); break; } } } match next_split { Some((a, b)) => unsafe { let elt = raw::slice_unchecked(self.string, b, len); self.string = raw::slice_unchecked(self.string, 0, a); Some(elt) }, None => { self.finished = true; Some(self.string) } } } } impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplitsN<'a, Sep> { #[inline] fn next(&mut self) -> Option<&'a str> { if self.count != 0 { self.count -= 1; if self.invert { self.iter.next_back() } else { self.iter.next() } } else { self.iter.get_end() } } } /// An iterator over the start and end indices of the matches of a /// substring within a larger string #[deriving(Clone)] pub struct MatchIndices<'a> { priv haystack: &'a str, priv needle: &'a str, priv position: uint, } /// An iterator over the substrings of a string separated by a given /// search string #[deriving(Clone)] pub struct StrSplits<'a> { priv it: MatchIndices<'a>, priv last_end: uint, priv finished: bool } impl<'a> Iterator<(uint, uint)> for MatchIndices<'a> { #[inline] fn next(&mut self) -> Option<(uint, uint)> { // See Issue #1932 for why this is a naive search let (h_len, n_len) = (self.haystack.len(), self.needle.len()); let mut match_start = 0; let mut match_i = 0; while self.position < h_len { if self.haystack[self.position] == self.needle[match_i] { if match_i == 0 { match_start = self.position; } match_i += 1; self.position += 1; if match_i == n_len { // found a match! return Some((match_start, self.position)); } } else { // failed match, backtrack if match_i > 0 { match_i = 0; self.position = match_start; } self.position += 1; } } None } } impl<'a> Iterator<&'a str> for StrSplits<'a> { #[inline] fn next(&mut self) -> Option<&'a str> { if self.finished { return None; } match self.it.next() { Some((from, to)) => { let ret = Some(self.it.haystack.slice(self.last_end, from)); self.last_end = to; ret } None => { self.finished = true; Some(self.it.haystack.slice(self.last_end, self.it.haystack.len())) } } } } // Helper functions used for Unicode normalization fn canonical_sort(comb: &mut [(char, u8)]) { use iter::range; use tuple::CopyableTuple; let len = comb.len(); for i in range(0, len) { let mut swapped = false; for j in range(1, len-i) { let classA = comb[j-1].second(); let classB = comb[j].second(); if classA != 0 && classB != 0 && classA > classB { comb.swap(j-1, j); swapped = true; } } if !swapped { break; } } } #[deriving(Clone)] enum NormalizationForm { NFD, NFKD } /// External iterator for a string's normalization's characters. /// Use with the `std::iter` module. #[deriving(Clone)] struct Normalizations<'a> { priv kind: NormalizationForm, priv iter: Chars<'a>, priv buffer: ~[(char, u8)], priv sorted: bool } impl<'a> Iterator<char> for Normalizations<'a> { #[inline] fn next(&mut self) -> Option<char> { use unicode::decompose::canonical_combining_class; match self.buffer.head() { Some(&(c, 0)) => { self.sorted = false; self.buffer.shift(); return Some(c); } Some(&(c, _)) if self.sorted => { self.buffer.shift(); return Some(c); } _ => self.sorted = false } let decomposer = match self.kind { NFD => char::decompose_canonical, NFKD => char::decompose_compatible }; if !self.sorted { for ch in self.iter { decomposer(ch, |d| { let class = canonical_combining_class(d); if class == 0 && !self.sorted { canonical_sort(self.buffer); self.sorted = true; } self.buffer.push((d, class)); }); if self.sorted { break } } } if !self.sorted { canonical_sort(self.buffer); self.sorted = true; } match self.buffer.shift() { Some((c, 0)) => { self.sorted = false; Some(c) } Some((c, _)) => Some(c), None => None } } fn size_hint(&self) -> (uint, Option<uint>) { let (lower, _) = self.iter.size_hint(); (lower, None) } } /// Replace all occurrences of one string with another /// /// # Arguments /// /// * s - The string containing substrings to replace /// * from - The string to replace /// * to - The replacement string /// /// # Return value /// /// The original string with all occurances of `from` replaced with `to` pub fn replace(s: &str, from: &str, to: &str) -> ~str { let mut result = ~""; let mut last_end = 0; for (start, end) in s.match_indices(from) { result.push_str(unsafe{raw::slice_bytes(s, last_end, start)}); result.push_str(to); last_end = end; } result.push_str(unsafe{raw::slice_bytes(s, last_end, s.len())}); result } /* Section: Comparing strings */ // share the implementation of the lang-item vs. non-lang-item // eq_slice. #[inline] fn eq_slice_(a: &str, b: &str) -> bool { a.len() == b.len() && unsafe { libc::memcmp(a.as_ptr() as *libc::c_void, b.as_ptr() as *libc::c_void, a.len() as libc::size_t) == 0 } } /// Bytewise slice equality #[cfg(not(test))] #[lang="str_eq"] #[inline] pub fn eq_slice(a: &str, b: &str) -> bool { eq_slice_(a, b) } /// Bytewise slice equality #[cfg(test)] #[inline] pub fn eq_slice(a: &str, b: &str) -> bool { eq_slice_(a, b) } /// Bytewise string equality #[cfg(not(test))] #[lang="uniq_str_eq"] #[inline] pub fn eq(a: &~str, b: &~str) -> bool { eq_slice(*a, *b) } #[cfg(test)] #[inline] pub fn eq(a: &~str, b: &~str) -> bool { eq_slice(*a, *b) } /* Section: Misc */ /// Determines if a vector of bytes contains valid UTF-8 pub fn is_utf8(v: &[u8]) -> bool { let mut i = 0u; let total = v.len(); fn unsafe_get(xs: &[u8], i: uint) -> u8 { unsafe { *xs.unsafe_ref(i) } } while i < total { let v_i = unsafe_get(v, i); if v_i < 128u8 { i += 1u; } else { let w = utf8_char_width(v_i); if w == 0u { return false; } let nexti = i + w; if nexti > total { return false; } // 2-byte encoding is for codepoints \u0080 to \u07ff // first C2 80 last DF BF // 3-byte encoding is for codepoints \u0800 to \uffff // first E0 A0 80 last EF BF BF // excluding surrogates codepoints \ud800 to \udfff // ED A0 80 to ED BF BF // 4-byte encoding is for codepoints \u10000 to \u10ffff // first F0 90 80 80 last F4 8F BF BF // // Use the UTF-8 syntax from the RFC // // https://tools.ietf.org/html/rfc3629 // UTF8-1 = %x00-7F // UTF8-2 = %xC2-DF UTF8-tail // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) / // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail ) // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) / // %xF4 %x80-8F 2( UTF8-tail ) // UTF8-tail = %x80-BF match w { 2 => if unsafe_get(v, i + 1) & 192u8 != TAG_CONT_U8 { return false }, 3 => match (v_i, unsafe_get(v, i + 1), unsafe_get(v, i + 2) & 192u8) { (0xE0 , 0xA0 .. 0xBF, TAG_CONT_U8) => (), (0xE1 .. 0xEC, 0x80 .. 0xBF, TAG_CONT_U8) => (), (0xED , 0x80 .. 0x9F, TAG_CONT_U8) => (), (0xEE .. 0xEF, 0x80 .. 0xBF, TAG_CONT_U8) => (), _ => return false, }, _ => match (v_i, unsafe_get(v, i + 1), unsafe_get(v, i + 2) & 192u8, unsafe_get(v, i + 3) & 192u8) { (0xF0 , 0x90 .. 0xBF, TAG_CONT_U8, TAG_CONT_U8) => (), (0xF1 .. 0xF3, 0x80 .. 0xBF, TAG_CONT_U8, TAG_CONT_U8) => (), (0xF4 , 0x80 .. 0x8F, TAG_CONT_U8, TAG_CONT_U8) => (), _ => return false, }, } i = nexti; } } true } /// Determines if a vector of `u16` contains valid UTF-16 pub fn is_utf16(v: &[u16]) -> bool { let len = v.len(); let mut i = 0u; while i < len { let u = v[i]; if u <= 0xD7FF_u16 || u >= 0xE000_u16 { i += 1u; } else { if i+1u < len { return false; } let u2 = v[i+1u]; if u < 0xD7FF_u16 || u > 0xDBFF_u16 { return false; } if u2 < 0xDC00_u16 || u2 > 0xDFFF_u16 { return false; } i += 2u; } } return true; } /// Iterates over the utf-16 characters in the specified slice, yielding each /// decoded unicode character to the function provided. /// /// # Failures /// /// * Fails on invalid utf-16 data pub fn utf16_chars(v: &[u16], f: |char|) { let len = v.len(); let mut i = 0u; while i < len && v[i] != 0u16 { let u = v[i]; if u <= 0xD7FF_u16 || u >= 0xE000_u16 { f(unsafe { cast::transmute(u as u32) }); i += 1u; } else { let u2 = v[i+1u]; assert!(u >= 0xD800_u16 && u <= 0xDBFF_u16); assert!(u2 >= 0xDC00_u16 && u2 <= 0xDFFF_u16); let mut c: u32 = (u - 0xD800_u16) as u32; c = c << 10; c |= (u2 - 0xDC00_u16) as u32; c |= 0x1_0000_u32; f(unsafe { cast::transmute(c) }); i += 2u; } } } /// Allocates a new string from the utf-16 slice provided pub fn from_utf16(v: &[u16]) -> ~str { let mut buf = ~""; buf.reserve(v.len()); utf16_chars(v, |ch| buf.push_char(ch)); buf } /// Allocates a new string with the specified capacity. The string returned is /// the empty string, but has capacity for much more. #[inline] pub fn with_capacity(capacity: uint) -> ~str { unsafe { cast::transmute(vec::with_capacity::<~[u8]>(capacity)) } } // https://tools.ietf.org/html/rfc3629 static UTF8_CHAR_WIDTH: [u8, ..256] = [ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF 0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF 4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF ]; /// Given a first byte, determine how many bytes are in this UTF-8 character pub fn utf8_char_width(b: u8) -> uint { return UTF8_CHAR_WIDTH[b] as uint; } /// Struct that contains a `char` and the index of the first byte of /// the next `char` in a string. This can be used as a data structure /// for iterating over the UTF-8 bytes of a string. pub struct CharRange { /// Current `char` ch: char, /// Index of the first byte of the next `char` next: uint } // Return the initial codepoint accumulator for the first byte. // The first byte is special, only want bottom 5 bits for width 2, 4 bits // for width 3, and 3 bits for width 4 macro_rules! utf8_first_byte( ($byte:expr, $width:expr) => (($byte & (0x7F >> $width)) as uint) ) // return the value of $ch updated with continuation byte $byte macro_rules! utf8_acc_cont_byte( ($ch:expr, $byte:expr) => (($ch << 6) | ($byte & 63u8) as uint) ) static TAG_CONT_U8: u8 = 128u8; /// Unsafe operations pub mod raw { use cast; use container::Container; use libc; use ptr; use ptr::RawPtr; use str::{is_utf8, OwnedStr, StrSlice}; use vec; use vec::{MutableVector, ImmutableVector, OwnedVector}; use unstable::raw::Slice; /// Create a Rust string from a *u8 buffer of the given length pub unsafe fn from_buf_len(buf: *u8, len: uint) -> ~str { let mut v: ~[u8] = vec::with_capacity(len); ptr::copy_memory(v.as_mut_ptr(), buf, len); v.set_len(len); assert!(is_utf8(v)); ::cast::transmute(v) } #[lang="strdup_uniq"] #[cfg(not(test))] #[allow(missing_doc)] #[inline] pub unsafe fn strdup_uniq(ptr: *u8, len: uint) -> ~str { from_buf_len(ptr, len) } /// Create a Rust string from a null-terminated C string pub unsafe fn from_c_str(buf: *libc::c_char) -> ~str { let mut curr = buf; let mut i = 0; while *curr != 0 { i += 1; curr = ptr::offset(buf, i); } from_buf_len(buf as *u8, i as uint) } /// Converts a slice of bytes to a string slice without checking /// that the string contains valid UTF-8. pub unsafe fn from_utf8<'a>(v: &'a [u8]) -> &'a str { cast::transmute(v) } /// Converts an owned vector of bytes to a new owned string. This assumes /// that the utf-8-ness of the vector has already been validated #[inline] pub unsafe fn from_utf8_owned(v: ~[u8]) -> ~str { cast::transmute(v) } /// Converts a byte to a string. pub unsafe fn from_byte(u: u8) -> ~str { from_utf8_owned(~[u]) } /// Form a slice from a C string. Unsafe because the caller must ensure the /// C string has the static lifetime, or else the return value may be /// invalidated later. pub unsafe fn c_str_to_static_slice(s: *libc::c_char) -> &'static str { let s = s as *u8; let mut curr = s; let mut len = 0u; while *curr != 0u8 { len += 1u; curr = ptr::offset(s, len as int); } let v = Slice { data: s, len: len }; assert!(is_utf8(::cast::transmute(v))); ::cast::transmute(v) } /// Takes a bytewise (not UTF-8) slice from a string. /// /// Returns the substring from [`begin`..`end`). /// /// # Failure /// /// If begin is greater than end. /// If end is greater than the length of the string. #[inline] pub unsafe fn slice_bytes<'a>(s: &'a str, begin: uint, end: uint) -> &'a str { assert!(begin <= end); assert!(end <= s.len()); slice_unchecked(s, begin, end) } /// Takes a bytewise (not UTF-8) slice from a string. /// /// Returns the substring from [`begin`..`end`). /// /// Caller must check slice boundaries! #[inline] pub unsafe fn slice_unchecked<'a>(s: &'a str, begin: uint, end: uint) -> &'a str { cast::transmute(Slice { data: s.as_ptr().offset(begin as int), len: end - begin, }) } /// Appends a byte to a string. /// The caller must preserve the valid UTF-8 property. #[inline] pub unsafe fn push_byte(s: &mut ~str, b: u8) { as_owned_vec(s).push(b) } /// Appends a vector of bytes to a string. /// The caller must preserve the valid UTF-8 property. #[inline] pub unsafe fn push_bytes(s: &mut ~str, bytes: &[u8]) { vec::bytes::push_bytes(as_owned_vec(s), bytes); } /// Removes the last byte from a string and returns it. /// The caller must preserve the valid UTF-8 property. pub unsafe fn pop_byte(s: &mut ~str) -> u8 { let len = s.len(); assert!((len > 0u)); let b = s[len - 1u]; s.set_len(len - 1); return b; } /// Removes the first byte from a string and returns it. /// The caller must preserve the valid UTF-8 property. pub unsafe fn shift_byte(s: &mut ~str) -> u8 { let len = s.len(); assert!((len > 0u)); let b = s[0]; *s = s.slice(1, len).to_owned(); return b; } /// Access the str in its vector representation. /// The caller must preserve the valid UTF-8 property when modifying. #[inline] pub unsafe fn as_owned_vec<'a>(s: &'a mut ~str) -> &'a mut ~[u8] { cast::transmute(s) } /// Sets the length of a string /// /// This will explicitly set the size of the string, without actually /// modifing its buffers, so it is up to the caller to ensure that /// the string is actually the specified size. #[test] fn test_from_buf_len() { unsafe { let a = ~[65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 0u8]; let b = a.as_ptr(); let c = from_buf_len(b, 3u); assert_eq!(c, ~"AAA"); } } } /* Section: Trait implementations */ #[cfg(not(test))] #[allow(missing_doc)] pub mod traits { use container::Container; use cmp::{TotalOrd, Ordering, Less, Equal, Greater, Eq, Ord, Equiv, TotalEq}; use iter::Iterator; use ops::Add; use option::{Some, None}; use str::{Str, StrSlice, OwnedStr, eq_slice}; impl<'a> Add<&'a str,~str> for &'a str { #[inline] fn add(&self, rhs: & &'a str) -> ~str { let mut ret = self.to_owned(); ret.push_str(*rhs); ret } } impl<'a> TotalOrd for &'a str { #[inline] fn cmp(&self, other: & &'a str) -> Ordering { for (s_b, o_b) in self.bytes().zip(other.bytes()) { match s_b.cmp(&o_b) { Greater => return Greater, Less => return Less, Equal => () } } self.len().cmp(&other.len()) } } impl TotalOrd for ~str { #[inline] fn cmp(&self, other: &~str) -> Ordering { self.as_slice().cmp(&other.as_slice()) } } impl TotalOrd for @str { #[inline] fn cmp(&self, other: &@str) -> Ordering { self.as_slice().cmp(&other.as_slice()) } } impl<'a> Eq for &'a str { #[inline] fn eq(&self, other: & &'a str) -> bool { eq_slice((*self), (*other)) } #[inline] fn ne(&self, other: & &'a str) -> bool { !(*self).eq(other) } } impl Eq for ~str { #[inline] fn eq(&self, other: &~str) -> bool { eq_slice((*self), (*other)) } } impl Eq for @str { #[inline] fn eq(&self, other: &@str) -> bool { eq_slice((*self), (*other)) } } impl<'a> TotalEq for &'a str { #[inline] fn equals(&self, other: & &'a str) -> bool { eq_slice((*self), (*other)) } } impl TotalEq for ~str { #[inline] fn equals(&self, other: &~str) -> bool { eq_slice((*self), (*other)) } } impl TotalEq for @str { #[inline] fn equals(&self, other: &@str) -> bool { eq_slice((*self), (*other)) } } impl<'a> Ord for &'a str { #[inline] fn lt(&self, other: & &'a str) -> bool { self.cmp(other) == Less } } impl Ord for ~str { #[inline] fn lt(&self, other: &~str) -> bool { self.cmp(other) == Less } } impl Ord for @str { #[inline] fn lt(&self, other: &@str) -> bool { self.cmp(other) == Less } } impl<'a, S: Str> Equiv<S> for &'a str { #[inline] fn equiv(&self, other: &S) -> bool { eq_slice(*self, other.as_slice()) } } impl<'a, S: Str> Equiv<S> for @str { #[inline] fn equiv(&self, other: &S) -> bool { eq_slice(*self, other.as_slice()) } } impl<'a, S: Str> Equiv<S> for ~str { #[inline] fn equiv(&self, other: &S) -> bool { eq_slice(*self, other.as_slice()) } } } #[cfg(test)] pub mod traits {} /// Any string that can be represented as a slice pub trait Str { /// Work with `self` as a slice. fn as_slice<'a>(&'a self) -> &'a str; /// Convert `self` into a ~str, not making a copy if possible fn into_owned(self) -> ~str; } impl<'a> Str for &'a str { #[inline] fn as_slice<'a>(&'a self) -> &'a str { *self } #[inline] fn into_owned(self) -> ~str { self.to_owned() } } impl<'a> Str for ~str { #[inline] fn as_slice<'a>(&'a self) -> &'a str { let s: &'a str = *self; s } #[inline] fn into_owned(self) -> ~str { self } } impl<'a> Str for @str { #[inline] fn as_slice<'a>(&'a self) -> &'a str { let s: &'a str = *self; s } #[inline] fn into_owned(self) -> ~str { self.to_owned() } } impl<'a> Container for &'a str { #[inline] fn len(&self) -> uint { self.repr().len } } impl Container for ~str { #[inline] fn len(&self) -> uint { self.as_slice().len() } } impl Container for @str { #[inline] fn len(&self) -> uint { self.as_slice().len() } } impl Mutable for ~str { /// Remove all content, make the string empty #[inline] fn clear(&mut self) { unsafe { self.set_len(0) } } } /// Methods for string slices pub trait StrSlice<'a> { /// Returns true if one string contains another /// /// # Arguments /// /// - needle - The string to look for fn contains<'a>(&self, needle: &'a str) -> bool; /// Returns true if a string contains a char. /// /// # Arguments /// /// - needle - The char to look for fn contains_char(&self, needle: char) -> bool; /// An iterator over the characters of `self`. Note, this iterates /// over unicode code-points, not unicode graphemes. /// /// # Example /// /// ```rust /// let v: ~[char] = "abc åäö".chars().collect(); /// assert_eq!(v, ~['a', 'b', 'c', ' ', 'å', 'ä', 'ö']); /// ``` fn chars(&self) -> Chars<'a>; /// An iterator over the characters of `self`, in reverse order. fn chars_rev(&self) -> RevChars<'a>; /// An iterator over the bytes of `self` fn bytes(&self) -> Bytes<'a>; /// An iterator over the bytes of `self`, in reverse order fn bytes_rev(&self) -> RevBytes<'a>; /// An iterator over the characters of `self` and their byte offsets. fn char_indices(&self) -> CharOffsets<'a>; /// An iterator over the characters of `self` and their byte offsets, /// in reverse order. fn char_indices_rev(&self) -> RevCharOffsets<'a>; /// An iterator over substrings of `self`, separated by characters /// matched by `sep`. /// /// # Example /// /// ```rust /// let v: ~[&str] = "Mary had a little lamb".split(' ').collect(); /// assert_eq!(v, ~["Mary", "had", "a", "little", "lamb"]); /// /// let v: ~[&str] = "abc1def2ghi".split(|c: char| c.is_digit()).collect(); /// assert_eq!(v, ~["abc", "def", "ghi"]); /// /// let v: ~[&str] = "lionXXtigerXleopard".split('X').collect(); /// assert_eq!(v, ~["lion", "", "tiger", "leopard"]); /// ``` fn split<Sep: CharEq>(&self, sep: Sep) -> CharSplits<'a, Sep>; /// An iterator over substrings of `self`, separated by characters /// matched by `sep`, restricted to splitting at most `count` /// times. /// /// # Example /// /// ```rust /// let v: ~[&str] = "Mary had a little lambda".splitn(' ', 2).collect(); /// assert_eq!(v, ~["Mary", "had", "a little lambda"]); /// /// let v: ~[&str] = "abc1def2ghi".splitn(|c: char| c.is_digit(), 1).collect(); /// assert_eq!(v, ~["abc", "def2ghi"]); /// /// let v: ~[&str] = "lionXXtigerXleopard".splitn('X', 2).collect(); /// assert_eq!(v, ~["lion", "", "tigerXleopard"]); /// ``` fn splitn<Sep: CharEq>(&self, sep: Sep, count: uint) -> CharSplitsN<'a, Sep>; /// An iterator over substrings of `self`, separated by characters /// matched by `sep`. /// /// Equivalent to `split`, except that the trailing substring /// is skipped if empty (terminator semantics). /// /// # Example /// /// ```rust /// let v: ~[&str] = "A.B.".split_terminator('.').collect(); /// assert_eq!(v, ~["A", "B"]); /// /// let v: ~[&str] = "A..B..".split_terminator('.').collect(); /// assert_eq!(v, ~["A", "", "B", ""]); /// ``` fn split_terminator<Sep: CharEq>(&self, sep: Sep) -> CharSplits<'a, Sep>; /// An iterator over substrings of `self`, separated by characters /// matched by `sep`, in reverse order. /// /// # Example /// /// ```rust /// let v: ~[&str] = "Mary had a little lamb".rsplit(' ').collect(); /// assert_eq!(v, ~["lamb", "little", "a", "had", "Mary"]); /// /// let v: ~[&str] = "abc1def2ghi".rsplit(|c: char| c.is_digit()).collect(); /// assert_eq!(v, ~["ghi", "def", "abc"]); /// /// let v: ~[&str] = "lionXXtigerXleopard".rsplit('X').collect(); /// assert_eq!(v, ~["leopard", "tiger", "", "lion"]); /// ``` fn rsplit<Sep: CharEq>(&self, sep: Sep) -> RevCharSplits<'a, Sep>; /// An iterator over substrings of `self`, separated by characters /// matched by `sep`, starting from the end of the string. /// Restricted to splitting at most `count` times. /// /// # Example /// /// ```rust /// let v: ~[&str] = "Mary had a little lamb".rsplitn(' ', 2).collect(); /// assert_eq!(v, ~["lamb", "little", "Mary had a"]); /// /// let v: ~[&str] = "abc1def2ghi".rsplitn(|c: char| c.is_digit(), 1).collect(); /// assert_eq!(v, ~["ghi", "abc1def"]); /// /// let v: ~[&str] = "lionXXtigerXleopard".rsplitn('X', 2).collect(); /// assert_eq!(v, ~["leopard", "tiger", "lionX"]); /// ``` fn rsplitn<Sep: CharEq>(&self, sep: Sep, count: uint) -> CharSplitsN<'a, Sep>; /// An iterator over the start and end indices of the disjoint /// matches of `sep` within `self`. /// /// That is, each returned value `(start, end)` satisfies /// `self.slice(start, end) == sep`. For matches of `sep` within /// `self` that overlap, only the indicies corresponding to the /// first match are returned. /// /// # Example /// /// ```rust /// let v: ~[(uint, uint)] = "abcXXXabcYYYabc".match_indices("abc").collect(); /// assert_eq!(v, ~[(0,3), (6,9), (12,15)]); /// /// let v: ~[(uint, uint)] = "1abcabc2".match_indices("abc").collect(); /// assert_eq!(v, ~[(1,4), (4,7)]); /// /// let v: ~[(uint, uint)] = "ababa".match_indices("aba").collect(); /// assert_eq!(v, ~[(0, 3)]); // only the first `aba` /// ``` fn match_indices(&self, sep: &'a str) -> MatchIndices<'a>; /// An iterator over the substrings of `self` separated by `sep`. /// /// # Example /// /// ```rust /// let v: ~[&str] = "abcXXXabcYYYabc".split_str("abc").collect(); /// assert_eq!(v, ~["", "XXX", "YYY", ""]); /// /// let v: ~[&str] = "1abcabc2".split_str("abc").collect(); /// assert_eq!(v, ~["1", "", "2"]); /// ``` fn split_str(&self, &'a str) -> StrSplits<'a>; /// An iterator over the lines of a string (subsequences separated /// by `\n`). This does not include the empty string after a /// trailing `\n`. /// /// # Example /// /// ```rust /// let four_lines = "foo\nbar\n\nbaz\n"; /// let v: ~[&str] = four_lines.lines().collect(); /// assert_eq!(v, ~["foo", "bar", "", "baz"]); /// ``` fn lines(&self) -> CharSplits<'a, char>; /// An iterator over the lines of a string, separated by either /// `\n` or `\r\n`. As with `.lines()`, this does not include an /// empty trailing line. /// /// # Example /// /// ```rust /// let four_lines = "foo\r\nbar\n\r\nbaz\n"; /// let v: ~[&str] = four_lines.lines_any().collect(); /// assert_eq!(v, ~["foo", "bar", "", "baz"]); /// ``` fn lines_any(&self) -> AnyLines<'a>; /// An iterator over the words of a string (subsequences separated /// by any sequence of whitespace). Sequences of whitespace are /// collapsed, so empty "words" are not included. /// /// # Example /// /// ```rust /// let some_words = " Mary had\ta little \n\t lamb"; /// let v: ~[&str] = some_words.words().collect(); /// assert_eq!(v, ~["Mary", "had", "a", "little", "lamb"]); /// ``` fn words(&self) -> Words<'a>; /// An Iterator over the string in Unicode Normalization Form D /// (canonical decomposition). fn nfd_chars(&self) -> Normalizations<'a>; /// An Iterator over the string in Unicode Normalization Form KD /// (compatibility decomposition). fn nfkd_chars(&self) -> Normalizations<'a>; /// Returns true if the string contains only whitespace. /// /// Whitespace characters are determined by `char::is_whitespace`. /// /// # Example /// /// ```rust /// assert!(" \t\n".is_whitespace()); /// assert!("".is_whitespace()); /// /// assert!( !"abc".is_whitespace()); /// ``` fn is_whitespace(&self) -> bool; /// Returns true if the string contains only alphanumeric code /// points. /// /// Alphanumeric characters are determined by `char::is_alphanumeric`. /// /// # Example /// /// ```rust /// assert!("Löwe老虎Léopard123".is_alphanumeric()); /// assert!("".is_alphanumeric()); /// /// assert!( !" &*~".is_alphanumeric()); /// ``` fn is_alphanumeric(&self) -> bool; /// Returns the number of Unicode code points (`char`) that a /// string holds. /// /// This does not perform any normalization, and is `O(n)`, since /// UTF-8 is a variable width encoding of code points. /// /// *Warning*: The number of code points in a string does not directly /// correspond to the number of visible characters or width of the /// visible text due to composing characters, and double- and /// zero-width ones. /// /// See also `.len()` for the byte length. /// /// # Example /// /// ```rust /// // composed forms of `ö` and `é` /// let c = "Löwe 老虎 Léopard"; // German, Simplified Chinese, French /// // decomposed forms of `ö` and `é` /// let d = "Lo\u0308we 老虎 Le\u0301opard"; /// /// assert_eq!(c.char_len(), 15); /// assert_eq!(d.char_len(), 17); /// /// assert_eq!(c.len(), 21); /// assert_eq!(d.len(), 23); /// /// // the two strings *look* the same /// println!("{}", c); /// println!("{}", d); /// ``` fn char_len(&self) -> uint; /// Returns a slice of the given string from the byte range /// [`begin`..`end`). /// /// This operation is `O(1)`. /// /// Fails when `begin` and `end` do not point to valid characters /// or point beyond the last character of the string. /// /// See also `slice_to` and `slice_from` for slicing prefixes and /// suffixes of strings, and `slice_chars` for slicing based on /// code point counts. /// /// # Example /// /// ```rust /// let s = "Löwe 老虎 Léopard"; /// assert_eq!(s.slice(0, 1), "L"); /// /// assert_eq!(s.slice(1, 9), "öwe 老"); /// /// // these will fail: /// // byte 2 lies within `ö`: /// // s.slice(2, 3); /// /// // byte 8 lies within `老` /// // s.slice(1, 8); /// /// // byte 100 is outside the string /// // s.slice(3, 100); /// ``` fn slice(&self, begin: uint, end: uint) -> &'a str; /// Returns a slice of the string from `begin` to its end. /// /// Equivalent to `self.slice(begin, self.len())`. /// /// Fails when `begin` does not point to a valid character, or is /// out of bounds. /// /// See also `slice`, `slice_to` and `slice_chars`. fn slice_from(&self, begin: uint) -> &'a str; /// Returns a slice of the string from the beginning to byte /// `end`. /// /// Equivalent to `self.slice(0, end)`. /// /// Fails when `end` does not point to a valid character, or is /// out of bounds. /// /// See also `slice`, `slice_from` and `slice_chars`. fn slice_to(&self, end: uint) -> &'a str; /// Returns a slice of the string from the character range /// [`begin`..`end`). /// /// That is, start at the `begin`-th code point of the string and /// continue to the `end`-th code point. This does not detect or /// handle edge cases such as leaving a combining character as the /// first code point of the string. /// /// Due to the design of UTF-8, this operation is `O(end - /// begin)`. See `slice`, `slice_to` and `slice_from` for `O(1)` /// variants that use byte indices rather than code point /// indices. /// /// Fails if `begin` > `end` or the either `begin` or `end` are /// beyond the last character of the string. /// /// # Example /// /// ```rust /// let s = "Löwe 老虎 Léopard"; /// assert_eq!(s.slice_chars(0, 4), "Löwe"); /// assert_eq!(s.slice_chars(5, 7), "老虎"); /// ``` fn slice_chars(&self, begin: uint, end: uint) -> &'a str; /// Returns true if `needle` is a prefix of the string. fn starts_with(&self, needle: &str) -> bool; /// Returns true if `needle` is a suffix of the string. fn ends_with(&self, needle: &str) -> bool; /// Escape each char in `s` with `char::escape_default`. fn escape_default(&self) -> ~str; /// Escape each char in `s` with `char::escape_unicode`. fn escape_unicode(&self) -> ~str; /// Returns a string with leading and trailing whitespace removed. fn trim(&self) -> &'a str; /// Returns a string with leading whitespace removed. fn trim_left(&self) -> &'a str; /// Returns a string with trailing whitespace removed. fn trim_right(&self) -> &'a str; /// Returns a string with characters that match `to_trim` removed. /// /// # Arguments /// /// * to_trim - a character matcher /// /// # Example /// /// ```rust /// assert_eq!("11foo1bar11".trim_chars(&'1'), "foo1bar") /// assert_eq!("12foo1bar12".trim_chars(& &['1', '2']), "foo1bar") /// assert_eq!("123foo1bar123".trim_chars(&|c: char| c.is_digit()), "foo1bar") /// ``` fn trim_chars<C: CharEq>(&self, to_trim: &C) -> &'a str; /// Returns a string with leading `chars_to_trim` removed. /// /// # Arguments /// /// * to_trim - a character matcher /// /// # Example /// /// ```rust /// assert_eq!("11foo1bar11".trim_left_chars(&'1'), "foo1bar11") /// assert_eq!("12foo1bar12".trim_left_chars(& &['1', '2']), "foo1bar12") /// assert_eq!("123foo1bar123".trim_left_chars(&|c: char| c.is_digit()), "foo1bar123") /// ``` fn trim_left_chars<C: CharEq>(&self, to_trim: &C) -> &'a str; /// Returns a string with trailing `chars_to_trim` removed. /// /// # Arguments /// /// * to_trim - a character matcher /// /// # Example /// /// ```rust /// assert_eq!("11foo1bar11".trim_right_chars(&'1'), "11foo1bar") /// assert_eq!("12foo1bar12".trim_right_chars(& &['1', '2']), "12foo1bar") /// assert_eq!("123foo1bar123".trim_right_chars(&|c: char| c.is_digit()), "123foo1bar") /// ``` fn trim_right_chars<C: CharEq>(&self, to_trim: &C) -> &'a str; /// Replace all occurrences of one string with another. /// /// # Arguments /// /// * `from` - The string to replace /// * `to` - The replacement string /// /// # Return value /// /// The original string with all occurances of `from` replaced with `to`. /// /// # Example /// /// ```rust /// let s = ~"Do you know the muffin man, /// The muffin man, the muffin man, ..."; /// /// assert_eq!(s.replace("muffin man", "little lamb"), /// ~"Do you know the little lamb, /// The little lamb, the little lamb, ..."); /// /// // not found, so no change. /// assert_eq!(s.replace("cookie monster", "little lamb"), s); /// ``` fn replace(&self, from: &str, to: &str) -> ~str; /// Copy a slice into a new owned str. fn to_owned(&self) -> ~str; /// Copy a slice into a new managed str. fn to_managed(&self) -> @str; /// Converts to a vector of `u16` encoded as UTF-16. fn to_utf16(&self) -> ~[u16]; /// Copy a slice into a new `SendStr`. fn to_send_str(&self) -> SendStr; /// Check that `index`-th byte lies at the start and/or end of a /// UTF-8 code point sequence. /// /// The start and end of the string (when `index == self.len()`) /// are considered to be boundaries. /// /// Fails if `index` is greater than `self.len()`. /// /// # Example /// /// ```rust /// let s = "Löwe 老虎 Léopard"; /// assert!(s.is_char_boundary(0)); /// // start of `老` /// assert!(s.is_char_boundary(6)); /// assert!(s.is_char_boundary(s.len())); /// /// // second byte of `ö` /// assert!(!s.is_char_boundary(2)); /// /// // third byte of `老` /// assert!(!s.is_char_boundary(8)); /// ``` fn is_char_boundary(&self, index: uint) -> bool; /// Pluck a character out of a string and return the index of the next /// character. /// /// This function can be used to iterate over the unicode characters of a /// string. /// /// # Example /// /// This example manually iterate through the characters of a /// string; this should normally by done by `.chars()` or /// `.char_indices`. /// /// ```rust /// use std::str::CharRange; /// /// let s = "中华Việt Nam"; /// let mut i = 0u; /// while i < s.len() { /// let CharRange {ch, next} = s.char_range_at(i); /// println!("{}: {}", i, ch); /// i = next; /// } /// ``` /// /// ## Output /// /// ``` /// 0: 中 /// 3: 华 /// 6: V /// 7: i /// 8: ệ /// 11: t /// 12: /// 13: N /// 14: a /// 15: m /// ``` /// /// # Arguments /// /// * s - The string /// * i - The byte offset of the char to extract /// /// # Return value /// /// A record {ch: char, next: uint} containing the char value and the byte /// index of the next unicode character. /// /// # Failure /// /// If `i` is greater than or equal to the length of the string. /// If `i` is not the index of the beginning of a valid UTF-8 character. fn char_range_at(&self, start: uint) -> CharRange; /// Given a byte position and a str, return the previous char and its position. /// /// This function can be used to iterate over a unicode string in reverse. /// /// Returns 0 for next index if called on start index 0. fn char_range_at_reverse(&self, start: uint) -> CharRange; /// Plucks the character starting at the `i`th byte of a string fn char_at(&self, i: uint) -> char; /// Plucks the character ending at the `i`th byte of a string fn char_at_reverse(&self, i: uint) -> char; /// Work with the byte buffer of a string as a byte slice. fn as_bytes(&self) -> &'a [u8]; /// Returns the byte index of the first character of `self` that /// matches `search`. /// /// # Return value /// /// `Some` containing the byte index of the last matching character /// or `None` if there is no match /// /// # Example /// /// ```rust /// let s = "Löwe 老虎 Léopard"; /// /// assert_eq!(s.find('L'), Some(0)); /// assert_eq!(s.find('é'), Some(14)); /// /// // the first space /// assert_eq!(s.find(|c: char| c.is_whitespace()), Some(5)); /// /// // neither are found /// assert_eq!(s.find(&['1', '2']), None); /// ``` fn find<C: CharEq>(&self, search: C) -> Option<uint>; /// Returns the byte index of the last character of `self` that /// matches `search`. /// /// # Return value /// /// `Some` containing the byte index of the last matching character /// or `None` if there is no match. /// /// # Example /// /// ```rust /// let s = "Löwe 老虎 Léopard"; /// /// assert_eq!(s.rfind('L'), Some(13)); /// assert_eq!(s.rfind('é'), Some(14)); /// /// // the second space /// assert_eq!(s.rfind(|c: char| c.is_whitespace()), Some(12)); /// /// // searches for an occurrence of either `1` or `2`, but neither are found /// assert_eq!(s.rfind(&['1', '2']), None); /// ``` fn rfind<C: CharEq>(&self, search: C) -> Option<uint>; /// Returns the byte index of the first matching substring /// /// # Arguments /// /// * `needle` - The string to search for /// /// # Return value /// /// `Some` containing the byte index of the first matching substring /// or `None` if there is no match. /// /// # Example /// /// ```rust /// let s = "Löwe 老虎 Léopard"; /// /// assert_eq!(s.find_str("老虎 L"), Some(6)); /// assert_eq!(s.find_str("muffin man"), None); /// ``` fn find_str(&self, &str) -> Option<uint>; /// Given a string, make a new string with repeated copies of it. fn repeat(&self, nn: uint) -> ~str; /// Retrieves the first character from a string slice and returns /// it. This does not allocate a new string; instead, it returns a /// slice that point one character beyond the character that was /// shifted. /// /// # Failure /// /// If the string does not contain any characters. /// /// # Example /// /// ```rust /// let s = "Löwe 老虎 Léopard"; /// let (c, s1) = s.slice_shift_char(); /// assert_eq!(c, 'L'); /// assert_eq!(s1, "öwe 老虎 Léopard"); /// /// let (c, s2) = s1.slice_shift_char(); /// assert_eq!(c, 'ö'); /// assert_eq!(s2, "we 老虎 Léopard"); /// ``` fn slice_shift_char(&self) -> (char, &'a str); /// Levenshtein Distance between two strings. fn lev_distance(&self, t: &str) -> uint; /// Returns the byte offset of an inner slice relative to an enclosing outer slice. /// /// Fails if `inner` is not a direct slice contained within self. /// /// # Example /// /// ```rust /// let string = "a\nb\nc"; /// let lines: ~[&str] = string.lines().collect(); /// /// assert!(string.subslice_offset(lines[0]) == 0); // &"a" /// assert!(string.subslice_offset(lines[1]) == 2); // &"b" /// assert!(string.subslice_offset(lines[2]) == 4); // &"c" /// ``` fn subslice_offset(&self, inner: &str) -> uint; /// Return an unsafe pointer to the strings buffer. /// /// The caller must ensure that the string outlives this pointer, /// and that it is not reallocated (e.g. by pushing to the /// string). fn as_ptr(&self) -> *u8; } impl<'a> StrSlice<'a> for &'a str { #[inline] fn contains<'a>(&self, needle: &'a str) -> bool { self.find_str(needle).is_some() } #[inline] fn contains_char(&self, needle: char) -> bool { self.find(needle).is_some() } #[inline] fn chars(&self) -> Chars<'a> { Chars{string: *self} } #[inline] fn chars_rev(&self) -> RevChars<'a> { self.chars().rev() } #[inline] fn bytes(&self) -> Bytes<'a> { self.as_bytes().iter().map(|&b| b) } #[inline] fn bytes_rev(&self) -> RevBytes<'a> { self.bytes().rev() } #[inline] fn char_indices(&self) -> CharOffsets<'a> { CharOffsets{string: *self, iter: self.chars()} } #[inline] fn char_indices_rev(&self) -> RevCharOffsets<'a> { self.char_indices().rev() } #[inline] fn split<Sep: CharEq>(&self, sep: Sep) -> CharSplits<'a, Sep> { CharSplits { string: *self, only_ascii: sep.only_ascii(), sep: sep, allow_trailing_empty: true, finished: false, } } #[inline] fn splitn<Sep: CharEq>(&self, sep: Sep, count: uint) -> CharSplitsN<'a, Sep> { CharSplitsN { iter: self.split(sep), count: count, invert: false, } } #[inline] fn split_terminator<Sep: CharEq>(&self, sep: Sep) -> CharSplits<'a, Sep> { CharSplits { allow_trailing_empty: false, ..self.split(sep) } } #[inline] fn rsplit<Sep: CharEq>(&self, sep: Sep) -> RevCharSplits<'a, Sep> { self.split(sep).rev() } #[inline] fn rsplitn<Sep: CharEq>(&self, sep: Sep, count: uint) -> CharSplitsN<'a, Sep> { CharSplitsN { iter: self.split(sep), count: count, invert: true, } } #[inline] fn match_indices(&self, sep: &'a str) -> MatchIndices<'a> { assert!(!sep.is_empty()) MatchIndices { haystack: *self, needle: sep, position: 0 } } #[inline] fn split_str(&self, sep: &'a str) -> StrSplits<'a> { StrSplits { it: self.match_indices(sep), last_end: 0, finished: false } } #[inline] fn lines(&self) -> CharSplits<'a, char> { self.split_terminator('\n') } fn lines_any(&self) -> AnyLines<'a> { self.lines().map(|line| { let l = line.len(); if l > 0 && line[l - 1] == '\r' as u8 { line.slice(0, l - 1) } else { line } }) } #[inline] fn words(&self) -> Words<'a> { self.split(char::is_whitespace).filter(|s| !s.is_empty()) } #[inline] fn nfd_chars(&self) -> Normalizations<'a> { Normalizations { iter: self.chars(), buffer: ~[], sorted: false, kind: NFD } } #[inline] fn nfkd_chars(&self) -> Normalizations<'a> { Normalizations { iter: self.chars(), buffer: ~[], sorted: false, kind: NFKD } } #[inline] fn is_whitespace(&self) -> bool { self.chars().all(char::is_whitespace) } #[inline] fn is_alphanumeric(&self) -> bool { self.chars().all(char::is_alphanumeric) } #[inline] fn char_len(&self) -> uint { self.chars().len() } #[inline] fn slice(&self, begin: uint, end: uint) -> &'a str { assert!(self.is_char_boundary(begin) && self.is_char_boundary(end)); unsafe { raw::slice_bytes(*self, begin, end) } } #[inline] fn slice_from(&self, begin: uint) -> &'a str { self.slice(begin, self.len()) } #[inline] fn slice_to(&self, end: uint) -> &'a str { assert!(self.is_char_boundary(end)); unsafe { raw::slice_bytes(*self, 0, end) } } fn slice_chars(&self, begin: uint, end: uint) -> &'a str { assert!(begin <= end); let mut count = 0; let mut begin_byte = None; let mut end_byte = None; // This could be even more efficient by not decoding, // only finding the char boundaries for (idx, _) in self.char_indices() { if count == begin { begin_byte = Some(idx); } if count == end { end_byte = Some(idx); break; } count += 1; } if begin_byte.is_none() && count == begin { begin_byte = Some(self.len()) } if end_byte.is_none() && count == end { end_byte = Some(self.len()) } match (begin_byte, end_byte) { (None, _) => fail!("slice_chars: `begin` is beyond end of string"), (_, None) => fail!("slice_chars: `end` is beyond end of string"), (Some(a), Some(b)) => unsafe { raw::slice_bytes(*self, a, b) } } } #[inline] fn starts_with<'a>(&self, needle: &'a str) -> bool { let n = needle.len(); self.len() >= n && needle.as_bytes() == self.as_bytes().slice_to(n) } #[inline] fn ends_with(&self, needle: &str) -> bool { let (m, n) = (self.len(), needle.len()); m >= n && needle.as_bytes() == self.as_bytes().slice_from(m - n) } fn escape_default(&self) -> ~str { let mut out: ~str = ~""; out.reserve_at_least(self.len()); for c in self.chars() { c.escape_default(|c| out.push_char(c)); } out } fn escape_unicode(&self) -> ~str { let mut out: ~str = ~""; out.reserve_at_least(self.len()); for c in self.chars() { c.escape_unicode(|c| out.push_char(c)); } out } #[inline] fn trim(&self) -> &'a str { self.trim_left().trim_right() } #[inline] fn trim_left(&self) -> &'a str { self.trim_left_chars(&char::is_whitespace) } #[inline] fn trim_right(&self) -> &'a str { self.trim_right_chars(&char::is_whitespace) } #[inline] fn trim_chars<C: CharEq>(&self, to_trim: &C) -> &'a str { self.trim_left_chars(to_trim).trim_right_chars(to_trim) } #[inline] fn trim_left_chars<C: CharEq>(&self, to_trim: &C) -> &'a str { match self.find(|c: char| !to_trim.matches(c)) { None => "", Some(first) => unsafe { raw::slice_bytes(*self, first, self.len()) } } } #[inline] fn trim_right_chars<C: CharEq>(&self, to_trim: &C) -> &'a str { match self.rfind(|c: char| !to_trim.matches(c)) { None => "", Some(last) => { let next = self.char_range_at(last).next; unsafe { raw::slice_bytes(*self, 0u, next) } } } } fn replace(&self, from: &str, to: &str) -> ~str { let mut result = ~""; let mut last_end = 0; for (start, end) in self.match_indices(from) { result.push_str(unsafe{raw::slice_bytes(*self, last_end, start)}); result.push_str(to); last_end = end; } result.push_str(unsafe{raw::slice_bytes(*self, last_end, self.len())}); result } #[inline] fn to_owned(&self) -> ~str { let len = self.len(); unsafe { let mut v = vec::with_capacity(len); ptr::copy_memory(v.as_mut_ptr(), self.as_ptr(), len); v.set_len(len); ::cast::transmute(v) } } #[inline] fn to_managed(&self) -> @str { unsafe { let v: *&[u8] = cast::transmute(self); cast::transmute(at_vec::to_managed(*v)) } } fn to_utf16(&self) -> ~[u16] { let mut u = ~[]; for ch in self.chars() { // Arithmetic with u32 literals is easier on the eyes than chars. let mut ch = ch as u32; if (ch & 0xFFFF_u32) == ch { // The BMP falls through (assuming non-surrogate, as it // should) assert!(ch <= 0xD7FF_u32 || ch >= 0xE000_u32); u.push(ch as u16) } else { // Supplementary planes break into surrogates. assert!(ch >= 0x1_0000_u32 && ch <= 0x10_FFFF_u32); ch -= 0x1_0000_u32; let w1 = 0xD800_u16 | ((ch >> 10) as u16); let w2 = 0xDC00_u16 | ((ch as u16) & 0x3FF_u16); u.push_all([w1, w2]) } } u } #[inline] fn to_send_str(&self) -> SendStr { SendStrOwned(self.to_owned()) } #[inline] fn is_char_boundary(&self, index: uint) -> bool { if index == self.len() { return true; } let b = self[index]; return b < 128u8 || b >= 192u8; } #[inline] fn char_range_at(&self, i: uint) -> CharRange { if self[i] < 128u8 { return CharRange {ch: self[i] as char, next: i + 1 }; } // Multibyte case is a fn to allow char_range_at to inline cleanly fn multibyte_char_range_at(s: &str, i: uint) -> CharRange { let mut val = s[i] as uint; let w = UTF8_CHAR_WIDTH[val] as uint; assert!((w != 0)); val = utf8_first_byte!(val, w); val = utf8_acc_cont_byte!(val, s[i + 1]); if w > 2 { val = utf8_acc_cont_byte!(val, s[i + 2]); } if w > 3 { val = utf8_acc_cont_byte!(val, s[i + 3]); } return CharRange {ch: unsafe { transmute(val as u32) }, next: i + w}; } return multibyte_char_range_at(*self, i); } #[inline] fn char_at(&self, i: uint) -> char { self.char_range_at(i).ch } #[inline] fn char_range_at_reverse(&self, start: uint) -> CharRange { let mut prev = start; prev = prev.saturating_sub(1); if self[prev] < 128 { return CharRange{ch: self[prev] as char, next: prev} } // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly fn multibyte_char_range_at_reverse(s: &str, mut i: uint) -> CharRange { // while there is a previous byte == 10...... while i > 0 && s[i] & 192u8 == TAG_CONT_U8 { i -= 1u; } let mut val = s[i] as uint; let w = UTF8_CHAR_WIDTH[val] as uint; assert!((w != 0)); val = utf8_first_byte!(val, w); val = utf8_acc_cont_byte!(val, s[i + 1]); if w > 2 { val = utf8_acc_cont_byte!(val, s[i + 2]); } if w > 3 { val = utf8_acc_cont_byte!(val, s[i + 3]); } return CharRange {ch: unsafe { transmute(val as u32) }, next: i}; } return multibyte_char_range_at_reverse(*self, prev); } #[inline] fn char_at(&self, i: uint) -> char { self.char_range_at(i).ch } #[inline] fn char_at_reverse(&self, i: uint) -> char { self.char_range_at_reverse(i).ch } #[inline] fn as_bytes(&self) -> &'a [u8] { unsafe { cast::transmute(*self) } } fn find<C: CharEq>(&self, search: C) -> Option<uint> { if search.only_ascii() { self.bytes().position(|b| search.matches(b as char)) } else { for (index, c) in self.char_indices() { if search.matches(c) { return Some(index); } } None } } fn rfind<C: CharEq>(&self, search: C) -> Option<uint> { if search.only_ascii() { self.bytes().rposition(|b| search.matches(b as char)) } else { for (index, c) in self.char_indices_rev() { if search.matches(c) { return Some(index); } } None } } fn find_str(&self, needle: &str) -> Option<uint> { if needle.is_empty() { Some(0) } else { self.match_indices(needle) .next() .map(|(start, _end)| start) } } fn repeat(&self, nn: uint) -> ~str { let mut ret = with_capacity(nn * self.len()); for _ in range(0, nn) { ret.push_str(*self); } ret } #[inline] fn slice_shift_char(&self) -> (char, &'a str) { let CharRange {ch, next} = self.char_range_at(0u); let next_s = unsafe { raw::slice_bytes(*self, next, self.len()) }; return (ch, next_s); } fn lev_distance(&self, t: &str) -> uint { let slen = self.len(); let tlen = t.len(); if slen == 0 { return tlen; } if tlen == 0 { return slen; } let mut dcol = vec::from_fn(tlen + 1, |x| x); for (i, sc) in self.chars().enumerate() { let mut current = i; dcol[0] = current + 1; for (j, tc) in t.chars().enumerate() { let next = dcol[j + 1]; if sc == tc { dcol[j + 1] = current; } else { dcol[j + 1] = ::cmp::min(current, next); dcol[j + 1] = ::cmp::min(dcol[j + 1], dcol[j]) + 1; } current = next; } } return dcol[tlen]; } fn subslice_offset(&self, inner: &str) -> uint { let a_start = self.as_ptr() as uint; let a_end = a_start + self.len(); let b_start = inner.as_ptr() as uint; let b_end = b_start + inner.len(); assert!(a_start <= b_start); assert!(b_end <= a_end); b_start - a_start } #[inline] fn as_ptr(&self) -> *u8 { self.repr().data } } /// Methods for owned strings pub trait OwnedStr { /// Appends a string slice to the back of a string, without overallocating. fn push_str_no_overallocate(&mut self, rhs: &str); /// Appends a string slice to the back of a string fn push_str(&mut self, rhs: &str); /// Appends a character to the back of a string fn push_char(&mut self, c: char); /// Remove the final character from a string and return it /// /// # Failure /// /// If the string does not contain any characters fn pop_char(&mut self) -> char; /// Remove the first character from a string and return it /// /// # Failure /// /// If the string does not contain any characters fn shift_char(&mut self) -> char; /// Prepend a char to a string fn unshift_char(&mut self, ch: char); /// Insert a new sub-string at the given position in a string, in O(n + m) time /// (with n and m the lengths of the string and the substring.) /// This fails if `position` is not at a character boundary. fn insert(&mut self, position: uint, substring: &str); /// Insert a char at the given position in a string, in O(n + m) time /// (with n and m the lengths of the string and the substring.) /// This fails if `position` is not at a character boundary. fn insert_char(&mut self, position: uint, ch: char); /// Concatenate two strings together. fn append(self, rhs: &str) -> ~str; /// Reserves capacity for exactly `n` bytes in the given string. /// /// Assuming single-byte characters, the resulting string will be large /// enough to hold a string of length `n`. /// /// If the capacity for `s` is already equal to or greater than the requested /// capacity, then no action is taken. /// /// # Arguments /// /// * s - A string /// * n - The number of bytes to reserve space for fn reserve(&mut self, n: uint); /// Reserves capacity for at least `n` bytes in the given string. /// /// Assuming single-byte characters, the resulting string will be large /// enough to hold a string of length `n`. /// /// This function will over-allocate in order to amortize the allocation costs /// in scenarios where the caller may need to repeatedly reserve additional /// space. /// /// If the capacity for `s` is already equal to or greater than the requested /// capacity, then no action is taken. /// /// # Arguments /// /// * s - A string /// * n - The number of bytes to reserve space for fn reserve_at_least(&mut self, n: uint); /// Returns the number of single-byte characters the string can hold without /// reallocating fn capacity(&self) -> uint; /// Shorten a string to the specified length (which must be <= the current length) fn truncate(&mut self, len: uint); /// Consumes the string, returning the underlying byte buffer. /// /// The buffer does not have a null terminator. fn into_bytes(self) -> ~[u8]; /// Sets the length of a string /// /// This will explicitly set the size of the string, without actually /// modifying its buffers, so it is up to the caller to ensure that /// the string is actually the specified size. unsafe fn set_len(&mut self, new_len: uint); } impl OwnedStr for ~str { #[inline] fn push_str_no_overallocate(&mut self, rhs: &str) { let new_cap = self.len() + rhs.len(); self.reserve(new_cap); self.push_str(rhs); } #[inline] fn push_str(&mut self, rhs: &str) { unsafe { raw::push_bytes(self, rhs.as_bytes()); } } #[inline] fn push_char(&mut self, c: char) { let cur_len = self.len(); // may use up to 4 bytes. unsafe { let v = raw::as_owned_vec(self); v.reserve_additional(4); // Attempt to not use an intermediate buffer by just pushing bytes // directly onto this string. let write_ptr = v.as_mut_ptr().offset(cur_len as int); let used = vec::raw::mut_buf_as_slice(write_ptr, 4, |slc| c.encode_utf8(slc)); v.set_len(cur_len + used); } } #[inline] fn pop_char(&mut self) -> char { let end = self.len(); assert!(end > 0u); let CharRange {ch, next} = self.char_range_at_reverse(end); unsafe { self.set_len(next); } return ch; } #[inline] fn shift_char(&mut self) -> char { let CharRange {ch, next} = self.char_range_at(0u); *self = self.slice(next, self.len()).to_owned(); return ch; } #[inline] fn unshift_char(&mut self, ch: char) { // This could be more efficient. let mut new_str = ~""; new_str.push_char(ch); new_str.push_str(*self); *self = new_str; } #[inline] fn insert(&mut self, position: uint, substring: &str) { // This could be more efficient. let mut new_str = self.slice_to(position).to_owned(); new_str.push_str(substring); new_str.push_str(self.slice_from(position)); *self = new_str; } #[inline] fn insert_char(&mut self, position: uint, ch: char) { // This could be more efficient. let mut new_str = self.slice_to(position).to_owned(); new_str.push_char(ch); new_str.push_str(self.slice_from(position)); *self = new_str; } #[inline] fn append(self, rhs: &str) -> ~str { let mut new_str = self; new_str.push_str_no_overallocate(rhs); new_str } #[inline] fn reserve(&mut self, n: uint) { unsafe { raw::as_owned_vec(self).reserve(n) } } #[inline] fn reserve_at_least(&mut self, n: uint) { self.reserve(uint::next_power_of_two_opt(n).unwrap_or(n)) } #[inline] fn capacity(&self) -> uint { unsafe { let buf: &~[u8] = cast::transmute(self); buf.capacity() } } #[inline] fn truncate(&mut self, len: uint) { assert!(len <= self.len()); assert!(self.is_char_boundary(len)); unsafe { self.set_len(len); } } #[inline] fn into_bytes(self) -> ~[u8] { unsafe { cast::transmute(self) } } #[inline] unsafe fn set_len(&mut self, new_len: uint) { raw::as_owned_vec(self).set_len(new_len) } } impl Clone for ~str { #[inline] fn clone(&self) -> ~str { self.to_owned() } } impl DeepClone for ~str { #[inline] fn deep_clone(&self) -> ~str { self.to_owned() } } impl Clone for @str { #[inline] fn clone(&self) -> @str { *self } } impl DeepClone for @str { #[inline] fn deep_clone(&self) -> @str { *self } } impl FromIterator<char> for ~str { #[inline] fn from_iterator<T: Iterator<char>>(iterator: &mut T) -> ~str { let (lower, _) = iterator.size_hint(); let mut buf = with_capacity(lower); buf.extend(iterator); buf } } impl Extendable<char> for ~str { #[inline] fn extend<T: Iterator<char>>(&mut self, iterator: &mut T) { let (lower, _) = iterator.size_hint(); let reserve = lower + self.len(); self.reserve_at_least(reserve); for ch in *iterator { self.push_char(ch) } } } // This works because every lifetime is a sub-lifetime of 'static impl<'a> Default for &'a str { fn default() -> &'a str { "" } } impl Default for ~str { fn default() -> ~str { ~"" } } impl Default for @str { fn default() -> @str { @"" } } #[cfg(test)] mod tests { use iter::AdditiveIterator; use prelude::*; use ptr; use str::*; use send_str::{SendStrOwned, SendStrStatic}; #[test] fn test_eq() { assert!((eq(&~"", &~""))); assert!((eq(&~"foo", &~"foo"))); assert!((!eq(&~"foo", &~"bar"))); } #[test] fn test_eq_slice() { assert!((eq_slice("foobar".slice(0, 3), "foo"))); assert!((eq_slice("barfoo".slice(3, 6), "foo"))); assert!((!eq_slice("foo1", "foo2"))); } #[test] fn test_le() { assert!("" <= ""); assert!("" <= "foo"); assert!("foo" <= "foo"); assert!("foo" != "bar"); } #[test] fn test_len() { assert_eq!("".len(), 0u); assert_eq!("hello world".len(), 11u); assert_eq!("\x63".len(), 1u); assert_eq!("\xa2".len(), 2u); assert_eq!("\u03c0".len(), 2u); assert_eq!("\u2620".len(), 3u); assert_eq!("\U0001d11e".len(), 4u); assert_eq!("".char_len(), 0u); assert_eq!("hello world".char_len(), 11u); assert_eq!("\x63".char_len(), 1u); assert_eq!("\xa2".char_len(), 1u); assert_eq!("\u03c0".char_len(), 1u); assert_eq!("\u2620".char_len(), 1u); assert_eq!("\U0001d11e".char_len(), 1u); assert_eq!("ประเทศไทย中华Việt Nam".char_len(), 19u); } #[test] fn test_find() { assert_eq!("hello".find('l'), Some(2u)); assert_eq!("hello".find(|c:char| c == 'o'), Some(4u)); assert!("hello".find('x').is_none()); assert!("hello".find(|c:char| c == 'x').is_none()); assert_eq!("ประเทศไทย中华Việt Nam".find('华'), Some(30u)); assert_eq!("ประเทศไทย中华Việt Nam".find(|c: char| c == '华'), Some(30u)); } #[test] fn test_rfind() { assert_eq!("hello".rfind('l'), Some(3u)); assert_eq!("hello".rfind(|c:char| c == 'o'), Some(4u)); assert!("hello".rfind('x').is_none()); assert!("hello".rfind(|c:char| c == 'x').is_none()); assert_eq!("ประเทศไทย中华Việt Nam".rfind('华'), Some(30u)); assert_eq!("ประเทศไทย中华Việt Nam".rfind(|c: char| c == '华'), Some(30u)); } #[test] fn test_push_str() { let mut s = ~""; s.push_str(""); assert_eq!(s.slice_from(0), ""); s.push_str("abc"); assert_eq!(s.slice_from(0), "abc"); s.push_str("ประเทศไทย中华Việt Nam"); assert_eq!(s.slice_from(0), "abcประเทศไทย中华Việt Nam"); } #[test] fn test_append() { let mut s = ~""; s = s.append(""); assert_eq!(s.slice_from(0), ""); s = s.append("abc"); assert_eq!(s.slice_from(0), "abc"); s = s.append("ประเทศไทย中华Việt Nam"); assert_eq!(s.slice_from(0), "abcประเทศไทย中华Việt Nam"); } #[test] fn test_pop_char() { let mut data = ~"ประเทศไทย中华"; let cc = data.pop_char(); assert_eq!(~"ประเทศไทย中", data); assert_eq!('华', cc); } #[test] fn test_pop_char_2() { let mut data2 = ~"华"; let cc2 = data2.pop_char(); assert_eq!(~"", data2); assert_eq!('华', cc2); } #[test] #[should_fail] fn test_pop_char_fail() { let mut data = ~""; let _cc3 = data.pop_char(); } #[test] fn test_push_char() { let mut data = ~"ประเทศไทย中"; data.push_char('华'); data.push_char('b'); // 1 byte data.push_char('¢'); // 2 byte data.push_char('€'); // 3 byte data.push_char('𤭢'); // 4 byte assert_eq!(~"ประเทศไทย中华b¢€𤭢", data); } #[test] fn test_shift_char() { let mut data = ~"ประเทศไทย中"; let cc = data.shift_char(); assert_eq!(~"ระเทศไทย中", data); assert_eq!('ป', cc); } #[test] fn test_unshift_char() { let mut data = ~"ประเทศไทย中"; data.unshift_char('华'); assert_eq!(~"华ประเทศไทย中", data); } #[test] fn test_insert_char() { let mut data = ~"ประเทศไทย中"; data.insert_char(15, '华'); assert_eq!(~"ประเท华ศไทย中", data); } #[test] fn test_insert() { let mut data = ~"ประเทศไทย中"; data.insert(15, "华中"); assert_eq!(~"ประเท华中ศไทย中", data); } #[test] fn test_collect() { let empty = ~""; let s: ~str = empty.chars().collect(); assert_eq!(empty, s); let data = ~"ประเทศไทย中"; let s: ~str = data.chars().collect(); assert_eq!(data, s); } #[test] fn test_extend() { let data = ~"ประเทศไทย中"; let mut cpy = data.clone(); let other = "abc"; let mut it = other.chars(); cpy.extend(&mut it); assert_eq!(cpy, data + other); } #[test] fn test_clear() { let mut empty = ~""; empty.clear(); assert_eq!("", empty.as_slice()); let mut data = ~"ประเทศไทย中"; data.clear(); assert_eq!("", data.as_slice()); data.push_char('华'); assert_eq!("华", data.as_slice()); } #[test] fn test_into_bytes() { let data = ~"asdf"; let buf = data.into_bytes(); assert_eq!(bytes!("asdf"), buf.as_slice()); } #[test] fn test_find_str() { // byte positions assert_eq!("".find_str(""), Some(0u)); assert!("banana".find_str("apple pie").is_none()); let data = "abcabc"; assert_eq!(data.slice(0u, 6u).find_str("ab"), Some(0u)); assert_eq!(data.slice(2u, 6u).find_str("ab"), Some(3u - 2u)); assert!(data.slice(2u, 4u).find_str("ab").is_none()); let mut data = ~"ประเทศไทย中华Việt Nam"; data = data + data; assert!(data.find_str("ไท华").is_none()); assert_eq!(data.slice(0u, 43u).find_str(""), Some(0u)); assert_eq!(data.slice(6u, 43u).find_str(""), Some(6u - 6u)); assert_eq!(data.slice(0u, 43u).find_str("ประ"), Some( 0u)); assert_eq!(data.slice(0u, 43u).find_str("ทศไ"), Some(12u)); assert_eq!(data.slice(0u, 43u).find_str("ย中"), Some(24u)); assert_eq!(data.slice(0u, 43u).find_str("iệt"), Some(34u)); assert_eq!(data.slice(0u, 43u).find_str("Nam"), Some(40u)); assert_eq!(data.slice(43u, 86u).find_str("ประ"), Some(43u - 43u)); assert_eq!(data.slice(43u, 86u).find_str("ทศไ"), Some(55u - 43u)); assert_eq!(data.slice(43u, 86u).find_str("ย中"), Some(67u - 43u)); assert_eq!(data.slice(43u, 86u).find_str("iệt"), Some(77u - 43u)); assert_eq!(data.slice(43u, 86u).find_str("Nam"), Some(83u - 43u)); } #[test] fn test_slice_chars() { fn t(a: &str, b: &str, start: uint) { assert_eq!(a.slice_chars(start, start + b.char_len()), b); } t("", "", 0); t("hello", "llo", 2); t("hello", "el", 1); t("αβλ", "β", 1); t("αβλ", "", 3); assert_eq!("ะเทศไท", "ประเทศไทย中华Việt Nam".slice_chars(2, 8)); } #[test] fn test_concat() { fn t(v: &[~str], s: &str) { assert_eq!(v.concat(), s.to_str()); } t([~"you", ~"know", ~"I'm", ~"no", ~"good"], "youknowI'mnogood"); let v: &[~str] = []; t(v, ""); t([~"hi"], "hi"); } #[test] fn test_connect() { fn t(v: &[~str], sep: &str, s: &str) { assert_eq!(v.connect(sep), s.to_str()); } t([~"you", ~"know", ~"I'm", ~"no", ~"good"], " ", "you know I'm no good"); let v: &[~str] = []; t(v, " ", ""); t([~"hi"], " ", "hi"); } #[test] fn test_concat_slices() { fn t(v: &[&str], s: &str) { assert_eq!(v.concat(), s.to_str()); } t(["you", "know", "I'm", "no", "good"], "youknowI'mnogood"); let v: &[&str] = []; t(v, ""); t(["hi"], "hi"); } #[test] fn test_connect_slices() { fn t(v: &[&str], sep: &str, s: &str) { assert_eq!(v.connect(sep), s.to_str()); } t(["you", "know", "I'm", "no", "good"], " ", "you know I'm no good"); t([], " ", ""); t(["hi"], " ", "hi"); } #[test] fn test_repeat() { assert_eq!("x".repeat(4), ~"xxxx"); assert_eq!("hi".repeat(4), ~"hihihihi"); assert_eq!("ไท华".repeat(3), ~"ไท华ไท华ไท华"); assert_eq!("".repeat(4), ~""); assert_eq!("hi".repeat(0), ~""); } #[test] fn test_unsafe_slice() { assert_eq!("ab", unsafe {raw::slice_bytes("abc", 0, 2)}); assert_eq!("bc", unsafe {raw::slice_bytes("abc", 1, 3)}); assert_eq!("", unsafe {raw::slice_bytes("abc", 1, 1)}); fn a_million_letter_a() -> ~str { let mut i = 0; let mut rs = ~""; while i < 100000 { rs.push_str("aaaaaaaaaa"); i += 1; } rs } fn half_a_million_letter_a() -> ~str { let mut i = 0; let mut rs = ~""; while i < 100000 { rs.push_str("aaaaa"); i += 1; } rs } let letters = a_million_letter_a(); assert!(half_a_million_letter_a() == unsafe {raw::slice_bytes(letters, 0u, 500000)}.to_owned()); } #[test] fn test_starts_with() { assert!(("".starts_with(""))); assert!(("abc".starts_with(""))); assert!(("abc".starts_with("a"))); assert!((!"a".starts_with("abc"))); assert!((!"".starts_with("abc"))); assert!((!"ödd".starts_with("-"))); assert!(("ödd".starts_with("öd"))); } #[test] fn test_ends_with() { assert!(("".ends_with(""))); assert!(("abc".ends_with(""))); assert!(("abc".ends_with("c"))); assert!((!"a".ends_with("abc"))); assert!((!"".ends_with("abc"))); assert!((!"ddö".ends_with("-"))); assert!(("ddö".ends_with("dö"))); } #[test] fn test_is_empty() { assert!("".is_empty()); assert!(!"a".is_empty()); } #[test] fn test_replace() { let a = "a"; assert_eq!("".replace(a, "b"), ~""); assert_eq!("a".replace(a, "b"), ~"b"); assert_eq!("ab".replace(a, "b"), ~"bb"); let test = "test"; assert!(" test test ".replace(test, "toast") == ~" toast toast "); assert_eq!(" test test ".replace(test, ""), ~" "); } #[test] fn test_replace_2a() { let data = ~"ประเทศไทย中华"; let repl = ~"دولة الكويت"; let a = ~"ประเ"; let A = ~"دولة الكويتทศไทย中华"; assert_eq!(data.replace(a, repl), A); } #[test] fn test_replace_2b() { let data = ~"ประเทศไทย中华"; let repl = ~"دولة الكويت"; let b = ~"ะเ"; let B = ~"ปรدولة الكويتทศไทย中华"; assert_eq!(data.replace(b, repl), B); } #[test] fn test_replace_2c() { let data = ~"ประเทศไทย中华"; let repl = ~"دولة الكويت"; let c = ~"中华"; let C = ~"ประเทศไทยدولة الكويت"; assert_eq!(data.replace(c, repl), C); } #[test] fn test_replace_2d() { let data = ~"ประเทศไทย中华"; let repl = ~"دولة الكويت"; let d = ~"ไท华"; assert_eq!(data.replace(d, repl), data); } #[test] fn test_slice() { assert_eq!("ab", "abc".slice(0, 2)); assert_eq!("bc", "abc".slice(1, 3)); assert_eq!("", "abc".slice(1, 1)); assert_eq!("\u65e5", "\u65e5\u672c".slice(0, 3)); let data = "ประเทศไทย中华"; assert_eq!("ป", data.slice(0, 3)); assert_eq!("ร", data.slice(3, 6)); assert_eq!("", data.slice(3, 3)); assert_eq!("华", data.slice(30, 33)); fn a_million_letter_X() -> ~str { let mut i = 0; let mut rs = ~""; while i < 100000 { push_str(&mut rs, "华华华华华华华华华华"); i += 1; } rs } fn half_a_million_letter_X() -> ~str { let mut i = 0; let mut rs = ~""; while i < 100000 { push_str(&mut rs, "华华华华华"); i += 1; } rs } let letters = a_million_letter_X(); assert!(half_a_million_letter_X() == letters.slice(0u, 3u * 500000u).to_owned()); } #[test] fn test_slice_2() { let ss = "中华Việt Nam"; assert_eq!("华", ss.slice(3u, 6u)); assert_eq!("Việt Nam", ss.slice(6u, 16u)); assert_eq!("ab", "abc".slice(0u, 2u)); assert_eq!("bc", "abc".slice(1u, 3u)); assert_eq!("", "abc".slice(1u, 1u)); assert_eq!("中", ss.slice(0u, 3u)); assert_eq!("华V", ss.slice(3u, 7u)); assert_eq!("", ss.slice(3u, 3u)); /*0: 中 3: 华 6: V 7: i 8: ệ 11: t 12: 13: N 14: a 15: m */ } #[test] #[should_fail] fn test_slice_fail() { "中华Việt Nam".slice(0u, 2u); } #[test] fn test_slice_from() { assert_eq!("abcd".slice_from(0), "abcd"); assert_eq!("abcd".slice_from(2), "cd"); assert_eq!("abcd".slice_from(4), ""); } #[test] fn test_slice_to() { assert_eq!("abcd".slice_to(0), ""); assert_eq!("abcd".slice_to(2), "ab"); assert_eq!("abcd".slice_to(4), "abcd"); } #[test] fn test_trim_left_chars() { let v: &[char] = &[]; assert_eq!(" *** foo *** ".trim_left_chars(&v), " *** foo *** "); assert_eq!(" *** foo *** ".trim_left_chars(& &['*', ' ']), "foo *** "); assert_eq!(" *** *** ".trim_left_chars(& &['*', ' ']), ""); assert_eq!("foo *** ".trim_left_chars(& &['*', ' ']), "foo *** "); assert_eq!("11foo1bar11".trim_left_chars(&'1'), "foo1bar11"); assert_eq!("12foo1bar12".trim_left_chars(& &['1', '2']), "foo1bar12"); assert_eq!("123foo1bar123".trim_left_chars(&|c: char| c.is_digit()), "foo1bar123"); } #[test] fn test_trim_right_chars() { let v: &[char] = &[]; assert_eq!(" *** foo *** ".trim_right_chars(&v), " *** foo *** "); assert_eq!(" *** foo *** ".trim_right_chars(& &['*', ' ']), " *** foo"); assert_eq!(" *** *** ".trim_right_chars(& &['*', ' ']), ""); assert_eq!(" *** foo".trim_right_chars(& &['*', ' ']), " *** foo"); assert_eq!("11foo1bar11".trim_right_chars(&'1'), "11foo1bar"); assert_eq!("12foo1bar12".trim_right_chars(& &['1', '2']), "12foo1bar"); assert_eq!("123foo1bar123".trim_right_chars(&|c: char| c.is_digit()), "123foo1bar"); } #[test] fn test_trim_chars() { let v: &[char] = &[]; assert_eq!(" *** foo *** ".trim_chars(&v), " *** foo *** "); assert_eq!(" *** foo *** ".trim_chars(& &['*', ' ']), "foo"); assert_eq!(" *** *** ".trim_chars(& &['*', ' ']), ""); assert_eq!("foo".trim_chars(& &['*', ' ']), "foo"); assert_eq!("11foo1bar11".trim_chars(&'1'), "foo1bar"); assert_eq!("12foo1bar12".trim_chars(& &['1', '2']), "foo1bar"); assert_eq!("123foo1bar123".trim_chars(&|c: char| c.is_digit()), "foo1bar"); } #[test] fn test_trim_left() { assert_eq!("".trim_left(), ""); assert_eq!("a".trim_left(), "a"); assert_eq!(" ".trim_left(), ""); assert_eq!(" blah".trim_left(), "blah"); assert_eq!(" \u3000 wut".trim_left(), "wut"); assert_eq!("hey ".trim_left(), "hey "); } #[test] fn test_trim_right() { assert_eq!("".trim_right(), ""); assert_eq!("a".trim_right(), "a"); assert_eq!(" ".trim_right(), ""); assert_eq!("blah ".trim_right(), "blah"); assert_eq!("wut \u3000 ".trim_right(), "wut"); assert_eq!(" hey".trim_right(), " hey"); } #[test] fn test_trim() { assert_eq!("".trim(), ""); assert_eq!("a".trim(), "a"); assert_eq!(" ".trim(), ""); assert_eq!(" blah ".trim(), "blah"); assert_eq!("\nwut \u3000 ".trim(), "wut"); assert_eq!(" hey dude ".trim(), "hey dude"); } #[test] fn test_is_whitespace() { assert!("".is_whitespace()); assert!(" ".is_whitespace()); assert!("\u2009".is_whitespace()); // Thin space assert!(" \n\t ".is_whitespace()); assert!(!" _ ".is_whitespace()); } #[test] fn test_push_byte() { let mut s = ~"ABC"; unsafe{raw::push_byte(&mut s, 'D' as u8)}; assert_eq!(s, ~"ABCD"); } #[test] fn test_shift_byte() { let mut s = ~"ABC"; let b = unsafe{raw::shift_byte(&mut s)}; assert_eq!(s, ~"BC"); assert_eq!(b, 65u8); } #[test] fn test_pop_byte() { let mut s = ~"ABC"; let b = unsafe{raw::pop_byte(&mut s)}; assert_eq!(s, ~"AB"); assert_eq!(b, 67u8); } #[test] fn test_is_utf8() { // deny overlong encodings assert!(!is_utf8([0xc0, 0x80])); assert!(!is_utf8([0xc0, 0xae])); assert!(!is_utf8([0xe0, 0x80, 0x80])); assert!(!is_utf8([0xe0, 0x80, 0xaf])); assert!(!is_utf8([0xe0, 0x81, 0x81])); assert!(!is_utf8([0xf0, 0x82, 0x82, 0xac])); assert!(!is_utf8([0xf4, 0x90, 0x80, 0x80])); // deny surrogates assert!(!is_utf8([0xED, 0xA0, 0x80])); assert!(!is_utf8([0xED, 0xBF, 0xBF])); assert!(is_utf8([0xC2, 0x80])); assert!(is_utf8([0xDF, 0xBF])); assert!(is_utf8([0xE0, 0xA0, 0x80])); assert!(is_utf8([0xED, 0x9F, 0xBF])); assert!(is_utf8([0xEE, 0x80, 0x80])); assert!(is_utf8([0xEF, 0xBF, 0xBF])); assert!(is_utf8([0xF0, 0x90, 0x80, 0x80])); assert!(is_utf8([0xF4, 0x8F, 0xBF, 0xBF])); } #[test] fn test_raw_from_c_str() { unsafe { let a = ~[65, 65, 65, 65, 65, 65, 65, 0]; let b = a.as_ptr(); let c = raw::from_c_str(b); assert_eq!(c, ~"AAAAAAA"); } } #[test] fn test_as_bytes() { // no null let v = [ 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142, 86, 105, 225, 187, 135, 116, 32, 78, 97, 109 ]; assert_eq!("".as_bytes(), &[]); assert_eq!("abc".as_bytes(), &['a' as u8, 'b' as u8, 'c' as u8]); assert_eq!("ศไทย中华Việt Nam".as_bytes(), v); } #[test] #[should_fail] fn test_as_bytes_fail() { // Don't double free. (I'm not sure if this exercises the // original problem code path anymore.) let s = ~""; let _bytes = s.as_bytes(); fail!(); } #[test] fn test_as_ptr() { let buf = "hello".as_ptr(); unsafe { assert_eq!(*ptr::offset(buf, 0), 'h' as u8); assert_eq!(*ptr::offset(buf, 1), 'e' as u8); assert_eq!(*ptr::offset(buf, 2), 'l' as u8); assert_eq!(*ptr::offset(buf, 3), 'l' as u8); assert_eq!(*ptr::offset(buf, 4), 'o' as u8); } } #[test] fn test_subslice_offset() { let a = "kernelsprite"; let b = a.slice(7, a.len()); let c = a.slice(0, a.len() - 6); assert_eq!(a.subslice_offset(b), 7); assert_eq!(a.subslice_offset(c), 0); let string = "a\nb\nc"; let mut lines = ~[]; for line in string.lines() { lines.push(line) } assert_eq!(string.subslice_offset(lines[0]), 0); assert_eq!(string.subslice_offset(lines[1]), 2); assert_eq!(string.subslice_offset(lines[2]), 4); } #[test] #[should_fail] fn test_subslice_offset_2() { let a = "alchemiter"; let b = "cruxtruder"; a.subslice_offset(b); } #[test] fn vec_str_conversions() { let s1: ~str = ~"All mimsy were the borogoves"; let v: ~[u8] = s1.as_bytes().to_owned(); let s2: ~str = from_utf8(v).unwrap().to_owned(); let mut i: uint = 0u; let n1: uint = s1.len(); let n2: uint = v.len(); assert_eq!(n1, n2); while i < n1 { let a: u8 = s1[i]; let b: u8 = s2[i]; debug!("{}", a); debug!("{}", b); assert_eq!(a, b); i += 1u; } } #[test] fn test_contains() { assert!("abcde".contains("bcd")); assert!("abcde".contains("abcd")); assert!("abcde".contains("bcde")); assert!("abcde".contains("")); assert!("".contains("")); assert!(!"abcde".contains("def")); assert!(!"".contains("a")); let data = ~"ประเทศไทย中华Việt Nam"; assert!(data.contains("ประเ")); assert!(data.contains("ะเ")); assert!(data.contains("中华")); assert!(!data.contains("ไท华")); } #[test] fn test_contains_char() { assert!("abc".contains_char('b')); assert!("a".contains_char('a')); assert!(!"abc".contains_char('d')); assert!(!"".contains_char('a')); } #[test] fn test_utf16() { let pairs = [(~"𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n", ~[0xd800_u16, 0xdf45_u16, 0xd800_u16, 0xdf3f_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf46_u16, 0xd800_u16, 0xdf39_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf30_u16, 0x000a_u16]), (~"𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n", ~[0xd801_u16, 0xdc12_u16, 0xd801_u16, 0xdc49_u16, 0xd801_u16, 0xdc2e_u16, 0xd801_u16, 0xdc40_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4b_u16, 0x0020_u16, 0xd801_u16, 0xdc0f_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4d_u16, 0x000a_u16]), (~"𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n", ~[0xd800_u16, 0xdf00_u16, 0xd800_u16, 0xdf16_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf11_u16, 0xd800_u16, 0xdf09_u16, 0x00b7_u16, 0xd800_u16, 0xdf0c_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf15_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf09_u16, 0xd800_u16, 0xdf11_u16, 0x000a_u16 ]), (~"𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n", ~[0xd801_u16, 0xdc8b_u16, 0xd801_u16, 0xdc98_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc91_u16, 0xd801_u16, 0xdc9b_u16, 0xd801_u16, 0xdc92_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc93_u16, 0x0020_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc9a_u16, 0xd801_u16, 0xdc8d_u16, 0x0020_u16, 0xd801_u16, 0xdc8f_u16, 0xd801_u16, 0xdc9c_u16, 0xd801_u16, 0xdc92_u16, 0xd801_u16, 0xdc96_u16, 0xd801_u16, 0xdc86_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc86_u16, 0x000a_u16 ]) ]; for p in pairs.iter() { let (s, u) = (*p).clone(); assert!(s.to_utf16() == u); assert!(from_utf16(u) == s); assert!(from_utf16(s.to_utf16()) == s); assert!(from_utf16(u).to_utf16() == u); } } #[test] fn test_char_at() { let s = ~"ศไทย中华Việt Nam"; let v = ~['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m']; let mut pos = 0; for ch in v.iter() { assert!(s.char_at(pos) == *ch); pos += from_char(*ch).len(); } } #[test] fn test_char_at_reverse() { let s = ~"ศไทย中华Việt Nam"; let v = ~['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m']; let mut pos = s.len(); for ch in v.rev_iter() { assert!(s.char_at_reverse(pos) == *ch); pos -= from_char(*ch).len(); } } #[test] fn test_escape_unicode() { assert_eq!("abc".escape_unicode(), ~"\\x61\\x62\\x63"); assert_eq!("a c".escape_unicode(), ~"\\x61\\x20\\x63"); assert_eq!("\r\n\t".escape_unicode(), ~"\\x0d\\x0a\\x09"); assert_eq!("'\"\\".escape_unicode(), ~"\\x27\\x22\\x5c"); assert_eq!("\x00\x01\xfe\xff".escape_unicode(), ~"\\x00\\x01\\xfe\\xff"); assert_eq!("\u0100\uffff".escape_unicode(), ~"\\u0100\\uffff"); assert_eq!("\U00010000\U0010ffff".escape_unicode(), ~"\\U00010000\\U0010ffff"); assert_eq!("ab\ufb00".escape_unicode(), ~"\\x61\\x62\\ufb00"); assert_eq!("\U0001d4ea\r".escape_unicode(), ~"\\U0001d4ea\\x0d"); } #[test] fn test_escape_default() { assert_eq!("abc".escape_default(), ~"abc"); assert_eq!("a c".escape_default(), ~"a c"); assert_eq!("\r\n\t".escape_default(), ~"\\r\\n\\t"); assert_eq!("'\"\\".escape_default(), ~"\\'\\\"\\\\"); assert_eq!("\u0100\uffff".escape_default(), ~"\\u0100\\uffff"); assert_eq!("\U00010000\U0010ffff".escape_default(), ~"\\U00010000\\U0010ffff"); assert_eq!("ab\ufb00".escape_default(), ~"ab\\ufb00"); assert_eq!("\U0001d4ea\r".escape_default(), ~"\\U0001d4ea\\r"); } #[test] fn test_to_managed() { assert_eq!("abc".to_managed(), @"abc"); assert_eq!("abcdef".slice(1, 5).to_managed(), @"bcde"); } #[test] fn test_total_ord() { "1234".cmp(& &"123") == Greater; "123".cmp(& &"1234") == Less; "1234".cmp(& &"1234") == Equal; "12345555".cmp(& &"123456") == Less; "22".cmp(& &"1234") == Greater; } #[test] fn test_char_range_at() { let data = ~"b¢€𤭢𤭢€¢b"; assert_eq!('b', data.char_range_at(0).ch); assert_eq!('¢', data.char_range_at(1).ch); assert_eq!('€', data.char_range_at(3).ch); assert_eq!('𤭢', data.char_range_at(6).ch); assert_eq!('𤭢', data.char_range_at(10).ch); assert_eq!('€', data.char_range_at(14).ch); assert_eq!('¢', data.char_range_at(17).ch); assert_eq!('b', data.char_range_at(19).ch); } #[test] fn test_char_range_at_reverse_underflow() { assert_eq!("abc".char_range_at_reverse(0).next, 0); } #[test] fn test_add() { #[allow(unnecessary_allocation)]; macro_rules! t ( ($s1:expr, $s2:expr, $e:expr) => { { let s1 = $s1; let s2 = $s2; let e = $e; assert_eq!(s1 + s2, e.to_owned()); assert_eq!(s1.to_owned() + s2, e.to_owned()); assert_eq!(s1.to_managed() + s2, e.to_owned()); } } ); t!("foo", "bar", "foobar"); t!("foo", @"bar", "foobar"); t!("foo", ~"bar", "foobar"); t!("ศไทย中", "华Việt Nam", "ศไทย中华Việt Nam"); t!("ศไทย中", @"华Việt Nam", "ศไทย中华Việt Nam"); t!("ศไทย中", ~"华Việt Nam", "ศไทย中华Việt Nam"); } #[test] fn test_iterator() { use iter::*; let s = ~"ศไทย中华Việt Nam"; let v = ~['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m']; let mut pos = 0; let mut it = s.chars(); for c in it { assert_eq!(c, v[pos]); pos += 1; } assert_eq!(pos, v.len()); } #[test] fn test_rev_iterator() { use iter::*; let s = ~"ศไทย中华Việt Nam"; let v = ~['m', 'a', 'N', ' ', 't', 'ệ','i','V','华','中','ย','ท','ไ','ศ']; let mut pos = 0; let mut it = s.chars_rev(); for c in it { assert_eq!(c, v[pos]); pos += 1; } assert_eq!(pos, v.len()); } #[test] fn test_iterator_clone() { let s = "ศไทย中华Việt Nam"; let mut it = s.chars(); it.next(); assert!(it.zip(it.clone()).all(|(x,y)| x == y)); } #[test] fn test_bytesator() { let s = ~"ศไทย中华Việt Nam"; let v = [ 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142, 86, 105, 225, 187, 135, 116, 32, 78, 97, 109 ]; let mut pos = 0; for b in s.bytes() { assert_eq!(b, v[pos]); pos += 1; } } #[test] fn test_bytes_revator() { let s = ~"ศไทย中华Việt Nam"; let v = [ 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142, 86, 105, 225, 187, 135, 116, 32, 78, 97, 109 ]; let mut pos = v.len(); for b in s.bytes_rev() { pos -= 1; assert_eq!(b, v[pos]); } } #[test] fn test_char_indicesator() { use iter::*; let s = "ศไทย中华Việt Nam"; let p = [0, 3, 6, 9, 12, 15, 18, 19, 20, 23, 24, 25, 26, 27]; let v = ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m']; let mut pos = 0; let mut it = s.char_indices(); for c in it { assert_eq!(c, (p[pos], v[pos])); pos += 1; } assert_eq!(pos, v.len()); assert_eq!(pos, p.len()); } #[test] fn test_char_indices_revator() { use iter::*; let s = "ศไทย中华Việt Nam"; let p = [27, 26, 25, 24, 23, 20, 19, 18, 15, 12, 9, 6, 3, 0]; let v = ['m', 'a', 'N', ' ', 't', 'ệ','i','V','华','中','ย','ท','ไ','ศ']; let mut pos = 0; let mut it = s.char_indices_rev(); for c in it { assert_eq!(c, (p[pos], v[pos])); pos += 1; } assert_eq!(pos, v.len()); assert_eq!(pos, p.len()); } #[test] fn test_split_char_iterator() { let data = "\nMäry häd ä little lämb\nLittle lämb\n"; let split: ~[&str] = data.split(' ').collect(); assert_eq!( split, ~["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]); let mut rsplit: ~[&str] = data.rsplit(' ').collect(); rsplit.reverse(); assert_eq!(rsplit, ~["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]); let split: ~[&str] = data.split(|c: char| c == ' ').collect(); assert_eq!( split, ~["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]); let mut rsplit: ~[&str] = data.rsplit(|c: char| c == ' ').collect(); rsplit.reverse(); assert_eq!(rsplit, ~["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]); // Unicode let split: ~[&str] = data.split('ä').collect(); assert_eq!( split, ~["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]); let mut rsplit: ~[&str] = data.rsplit('ä').collect(); rsplit.reverse(); assert_eq!(rsplit, ~["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]); let split: ~[&str] = data.split(|c: char| c == 'ä').collect(); assert_eq!( split, ~["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]); let mut rsplit: ~[&str] = data.rsplit(|c: char| c == 'ä').collect(); rsplit.reverse(); assert_eq!(rsplit, ~["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]); } #[test] fn test_splitn_char_iterator() { let data = "\nMäry häd ä little lämb\nLittle lämb\n"; let split: ~[&str] = data.splitn(' ', 3).collect(); assert_eq!(split, ~["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]); let split: ~[&str] = data.splitn(|c: char| c == ' ', 3).collect(); assert_eq!(split, ~["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]); // Unicode let split: ~[&str] = data.splitn('ä', 3).collect(); assert_eq!(split, ~["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]); let split: ~[&str] = data.splitn(|c: char| c == 'ä', 3).collect(); assert_eq!(split, ~["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]); } #[test] fn test_rsplitn_char_iterator() { let data = "\nMäry häd ä little lämb\nLittle lämb\n"; let mut split: ~[&str] = data.rsplitn(' ', 3).collect(); split.reverse(); assert_eq!(split, ~["\nMäry häd ä", "little", "lämb\nLittle", "lämb\n"]); let mut split: ~[&str] = data.rsplitn(|c: char| c == ' ', 3).collect(); split.reverse(); assert_eq!(split, ~["\nMäry häd ä", "little", "lämb\nLittle", "lämb\n"]); // Unicode let mut split: ~[&str] = data.rsplitn('ä', 3).collect(); split.reverse(); assert_eq!(split, ~["\nMäry häd ", " little l", "mb\nLittle l", "mb\n"]); let mut split: ~[&str] = data.rsplitn(|c: char| c == 'ä', 3).collect(); split.reverse(); assert_eq!(split, ~["\nMäry häd ", " little l", "mb\nLittle l", "mb\n"]); } #[test] fn test_split_char_iterator_no_trailing() { let data = "\nMäry häd ä little lämb\nLittle lämb\n"; let split: ~[&str] = data.split('\n').collect(); assert_eq!(split, ~["", "Märy häd ä little lämb", "Little lämb", ""]); let split: ~[&str] = data.split_terminator('\n').collect(); assert_eq!(split, ~["", "Märy häd ä little lämb", "Little lämb"]); } #[test] fn test_rev_split_char_iterator_no_trailing() { let data = "\nMäry häd ä little lämb\nLittle lämb\n"; let mut split: ~[&str] = data.split('\n').rev().collect(); split.reverse(); assert_eq!(split, ~["", "Märy häd ä little lämb", "Little lämb", ""]); let mut split: ~[&str] = data.split_terminator('\n').rev().collect(); split.reverse(); assert_eq!(split, ~["", "Märy häd ä little lämb", "Little lämb"]); } #[test] fn test_words() { let data = "\n \tMäry häd\tä little lämb\nLittle lämb\n"; let words: ~[&str] = data.words().collect(); assert_eq!(words, ~["Märy", "häd", "ä", "little", "lämb", "Little", "lämb"]) } #[test] fn test_nfd_chars() { assert_eq!("abc".nfd_chars().collect::<~str>(), ~"abc"); assert_eq!("\u1e0b\u01c4".nfd_chars().collect::<~str>(), ~"d\u0307\u01c4"); assert_eq!("\u2026".nfd_chars().collect::<~str>(), ~"\u2026"); assert_eq!("\u2126".nfd_chars().collect::<~str>(), ~"\u03a9"); assert_eq!("\u1e0b\u0323".nfd_chars().collect::<~str>(), ~"d\u0323\u0307"); assert_eq!("\u1e0d\u0307".nfd_chars().collect::<~str>(), ~"d\u0323\u0307"); assert_eq!("a\u0301".nfd_chars().collect::<~str>(), ~"a\u0301"); assert_eq!("\u0301a".nfd_chars().collect::<~str>(), ~"\u0301a"); assert_eq!("\ud4db".nfd_chars().collect::<~str>(), ~"\u1111\u1171\u11b6"); assert_eq!("\uac1c".nfd_chars().collect::<~str>(), ~"\u1100\u1162"); } #[test] fn test_nfkd_chars() { assert_eq!("abc".nfkd_chars().collect::<~str>(), ~"abc"); assert_eq!("\u1e0b\u01c4".nfkd_chars().collect::<~str>(), ~"d\u0307DZ\u030c"); assert_eq!("\u2026".nfkd_chars().collect::<~str>(), ~"..."); assert_eq!("\u2126".nfkd_chars().collect::<~str>(), ~"\u03a9"); assert_eq!("\u1e0b\u0323".nfkd_chars().collect::<~str>(), ~"d\u0323\u0307"); assert_eq!("\u1e0d\u0307".nfkd_chars().collect::<~str>(), ~"d\u0323\u0307"); assert_eq!("a\u0301".nfkd_chars().collect::<~str>(), ~"a\u0301"); assert_eq!("\u0301a".nfkd_chars().collect::<~str>(), ~"\u0301a"); assert_eq!("\ud4db".nfkd_chars().collect::<~str>(), ~"\u1111\u1171\u11b6"); assert_eq!("\uac1c".nfkd_chars().collect::<~str>(), ~"\u1100\u1162"); } #[test] fn test_lines() { let data = "\nMäry häd ä little lämb\n\nLittle lämb\n"; let lines: ~[&str] = data.lines().collect(); assert_eq!(lines, ~["", "Märy häd ä little lämb", "", "Little lämb"]); let data = "\nMäry häd ä little lämb\n\nLittle lämb"; // no trailing \n let lines: ~[&str] = data.lines().collect(); assert_eq!(lines, ~["", "Märy häd ä little lämb", "", "Little lämb"]); } #[test] fn test_split_strator() { fn t<'a>(s: &str, sep: &'a str, u: ~[&str]) { let v: ~[&str] = s.split_str(sep).collect(); assert_eq!(v, u); } t("--1233345--", "12345", ~["--1233345--"]); t("abc::hello::there", "::", ~["abc", "hello", "there"]); t("::hello::there", "::", ~["", "hello", "there"]); t("hello::there::", "::", ~["hello", "there", ""]); t("::hello::there::", "::", ~["", "hello", "there", ""]); t("ประเทศไทย中华Việt Nam", "中华", ~["ประเทศไทย", "Việt Nam"]); t("zzXXXzzYYYzz", "zz", ~["", "XXX", "YYY", ""]); t("zzXXXzYYYz", "XXX", ~["zz", "zYYYz"]); t(".XXX.YYY.", ".", ~["", "XXX", "YYY", ""]); t("", ".", ~[""]); t("zz", "zz", ~["",""]); t("ok", "z", ~["ok"]); t("zzz", "zz", ~["","z"]); t("zzzzz", "zz", ~["","","z"]); } #[test] fn test_str_default() { use default::Default; fn t<S: Default + Str>() { let s: S = Default::default(); assert_eq!(s.as_slice(), ""); } t::<&str>(); t::<@str>(); t::<~str>(); } #[test] fn test_str_container() { fn sum_len<S: Container>(v: &[S]) -> uint { v.iter().map(|x| x.len()).sum() } let s = ~"01234"; assert_eq!(5, sum_len(["012", "", "34"])); assert_eq!(5, sum_len([@"01", @"2", @"34", @""])); assert_eq!(5, sum_len([~"01", ~"2", ~"34", ~""])); assert_eq!(5, sum_len([s.as_slice()])); } #[test] fn test_str_truncate() { let mut s = ~"12345"; s.truncate(5); assert_eq!(s.as_slice(), "12345"); s.truncate(3); assert_eq!(s.as_slice(), "123"); s.truncate(0); assert_eq!(s.as_slice(), ""); let mut s = ~"12345"; let p = s.as_ptr(); s.truncate(3); s.push_str("6"); let p_ = s.as_ptr(); assert_eq!(p_, p); } #[test] #[should_fail] fn test_str_truncate_invalid_len() { let mut s = ~"12345"; s.truncate(6); } #[test] #[should_fail] fn test_str_truncate_split_codepoint() { let mut s = ~"\u00FC"; // ü s.truncate(1); } #[test] fn test_str_from_utf8() { let xs = bytes!("hello"); assert_eq!(from_utf8(xs), Some("hello")); let xs = bytes!("ศไทย中华Việt Nam"); assert_eq!(from_utf8(xs), Some("ศไทย中华Việt Nam")); let xs = bytes!("hello", 0xff); assert_eq!(from_utf8(xs), None); } #[test] fn test_str_from_utf8_owned() { let xs = bytes!("hello").to_owned(); assert_eq!(from_utf8_owned(xs), Some(~"hello")); let xs = bytes!("ศไทย中华Việt Nam").to_owned(); assert_eq!(from_utf8_owned(xs), Some(~"ศไทย中华Việt Nam")); let xs = bytes!("hello", 0xff).to_owned(); assert_eq!(from_utf8_owned(xs), None); } #[test] fn test_to_send_str() { assert_eq!("abcde".to_send_str(), SendStrStatic("abcde")); assert_eq!("abcde".to_send_str(), SendStrOwned(~"abcde")); } #[test] fn test_from_str() { let owned: Option<~str> = from_str(&"string"); assert_eq!(owned, Some(~"string")); let managed: Option<@str> = from_str(&"string"); assert_eq!(managed, Some(@"string")); } } #[cfg(test)] mod bench { use extra::test::BenchHarness; use super::*; use prelude::*; #[bench] fn char_iterator(bh: &mut BenchHarness) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; let len = s.char_len(); bh.iter(|| assert_eq!(s.chars().len(), len)); } #[bench] fn char_iterator_ascii(bh: &mut BenchHarness) { let s = "Mary had a little lamb, Little lamb Mary had a little lamb, Little lamb Mary had a little lamb, Little lamb Mary had a little lamb, Little lamb Mary had a little lamb, Little lamb Mary had a little lamb, Little lamb"; let len = s.char_len(); bh.iter(|| assert_eq!(s.chars().len(), len)); } #[bench] fn char_iterator_rev(bh: &mut BenchHarness) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; let len = s.char_len(); bh.iter(|| assert_eq!(s.chars_rev().len(), len)); } #[bench] fn char_indicesator(bh: &mut BenchHarness) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; let len = s.char_len(); bh.iter(|| assert_eq!(s.char_indices().len(), len)); } #[bench] fn char_indicesator_rev(bh: &mut BenchHarness) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; let len = s.char_len(); bh.iter(|| assert_eq!(s.char_indices_rev().len(), len)); } #[bench] fn split_unicode_ascii(bh: &mut BenchHarness) { let s = "ประเทศไทย中华Việt Namประเทศไทย中华Việt Nam"; bh.iter(|| assert_eq!(s.split('V').len(), 3)); } #[bench] fn split_unicode_not_ascii(bh: &mut BenchHarness) { struct NotAscii(char); impl CharEq for NotAscii { fn matches(&self, c: char) -> bool { let NotAscii(cc) = *self; cc == c } fn only_ascii(&self) -> bool { false } } let s = "ประเทศไทย中华Việt Namประเทศไทย中华Việt Nam"; bh.iter(|| assert_eq!(s.split(NotAscii('V')).len(), 3)); } #[bench] fn split_ascii(bh: &mut BenchHarness) { let s = "Mary had a little lamb, Little lamb, little-lamb."; let len = s.split(' ').len(); bh.iter(|| assert_eq!(s.split(' ').len(), len)); } #[bench] fn split_not_ascii(bh: &mut BenchHarness) { struct NotAscii(char); impl CharEq for NotAscii { #[inline] fn matches(&self, c: char) -> bool { let NotAscii(cc) = *self; cc == c } fn only_ascii(&self) -> bool { false } } let s = "Mary had a little lamb, Little lamb, little-lamb."; let len = s.split(' ').len(); bh.iter(|| assert_eq!(s.split(NotAscii(' ')).len(), len)); } #[bench] fn split_extern_fn(bh: &mut BenchHarness) { let s = "Mary had a little lamb, Little lamb, little-lamb."; let len = s.split(' ').len(); fn pred(c: char) -> bool { c == ' ' } bh.iter(|| assert_eq!(s.split(pred).len(), len)); } #[bench] fn split_closure(bh: &mut BenchHarness) { let s = "Mary had a little lamb, Little lamb, little-lamb."; let len = s.split(' ').len(); bh.iter(|| assert_eq!(s.split(|c: char| c == ' ').len(), len)); } #[bench] fn split_slice(bh: &mut BenchHarness) { let s = "Mary had a little lamb, Little lamb, little-lamb."; let len = s.split(' ').len(); bh.iter(|| assert_eq!(s.split(&[' ']).len(), len)); } #[bench] fn is_utf8_100_ascii(bh: &mut BenchHarness) { let s = bytes!("Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "); assert_eq!(100, s.len()); bh.iter(|| { let _ = is_utf8(s); }); } #[bench] fn is_utf8_100_multibyte(bh: &mut BenchHarness) { let s = bytes!("𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰"); assert_eq!(100, s.len()); bh.iter(|| { let _ = is_utf8(s); }); } #[bench] fn bench_with_capacity(bh: &mut BenchHarness) { bh.iter(|| { let _ = with_capacity(100); }); } #[bench] fn bench_push_str(bh: &mut BenchHarness) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; bh.iter(|| { let mut r = ~""; r.push_str(s); }); } #[bench] fn bench_connect(bh: &mut BenchHarness) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; let sep = "→"; let v = [s, s, s, s, s, s, s, s, s, s]; bh.iter(|| { assert_eq!(v.connect(sep).len(), s.len() * 10 + sep.len() * 9); }) } }
30.205946
92
0.519294
2fe4048391798f99a113fdd4863287a78310ced5
838
//! Plox //! This library contains all Bézier curve- and font-related functionality. //! Essentially, everything you need to turn strings into Bézier curve buffers, //! and everything you need to manipulate said curves. //! //! There is no OpenGL stuff here, because i want to have the possibility to //! switch to Vulkan if i ever seriously intend to maintain this for real. extern crate nalgebra_glm as glm; extern crate ttf_parser as ttf; pub mod spline; pub mod svg; pub mod shaping; pub mod font; pub mod atlas; pub mod polynomial; pub mod line; pub mod tesselate; pub mod gpu; pub use polynomial::Poly; pub use spline::{Point, Quadratic, Spline}; /// Check if two numbers a,b are approximately equal. /// "Apprixmately" has a _very_ liberal definition in this case. fn approx(a: f32, b: f32) -> bool { (a - b).abs() < 1e-4 }
28.896552
79
0.73031
2f8e940258b5290243f909b2c42eda240c6403f6
5,440
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use hhbc_by_ref_adata_state::AdataState; use hhbc_by_ref_env::emitter::Emitter; use hhbc_by_ref_hhas_adata::{ HhasAdata, DARRAY_PREFIX, DICT_PREFIX, KEYSET_PREFIX, VARRAY_PREFIX, VEC_PREFIX, }; use hhbc_by_ref_hhbc_ast::*; use hhbc_by_ref_hhbc_string_utils as string_utils; use hhbc_by_ref_instruction_sequence::{Error, InstrSeq}; use hhbc_by_ref_options::HhvmFlags; use hhbc_by_ref_runtime::TypedValue; pub fn rewrite_typed_values<'arena>( alloc: &'arena bumpalo::Bump, emitter: &mut Emitter<'arena>, instrseq: &mut InstrSeq<'arena>, ) -> std::result::Result<(), hhbc_by_ref_instruction_sequence::Error> { instrseq.map_result_mut(&mut |instr| rewrite_typed_value(alloc, emitter, instr)) } fn rewrite_typed_value<'arena>( alloc: &'arena bumpalo::Bump, e: &mut Emitter<'arena>, instr: &mut Instruct<'arena>, ) -> std::result::Result<(), hhbc_by_ref_instruction_sequence::Error> { //use InstructLitConst::*; if let Instruct::ILitConst(InstructLitConst::TypedValue(tv)) = instr { *instr = Instruct::ILitConst(match &tv { TypedValue::Uninit => { return Err(Error::Unrecoverable("rewrite_typed_value: uninit".into())); } TypedValue::Null => InstructLitConst::Null, TypedValue::Bool(true) => InstructLitConst::True, TypedValue::Bool(false) => InstructLitConst::False, TypedValue::Int(i) => InstructLitConst::Int(*i), TypedValue::String(s) => InstructLitConst::String(s), TypedValue::LazyClass(s) => { let classid: hhbc_by_ref_hhbc_ast::ClassId<'arena> = hhbc_by_ref_hhbc_id::class::Type::from_ast_name_and_mangle(alloc, *s); InstructLitConst::LazyClass(classid) } TypedValue::Float(f) => { let fstr = bumpalo::collections::String::from_str_in( string_utils::float::to_string(*f).as_str(), alloc, ) .into_bump_str(); InstructLitConst::Double(fstr) } TypedValue::Keyset(_) => { let arrayid = get_array_identifier(alloc, e, tv); InstructLitConst::Keyset(arrayid) } TypedValue::Vec(_) => { let arrayid = get_array_identifier(alloc, e, tv); InstructLitConst::Vec(arrayid) } TypedValue::Dict(_) => { let arrayid = get_array_identifier(alloc, e, tv); InstructLitConst::Dict(arrayid) } TypedValue::HhasAdata(d) if d.is_empty() => { return Err(Error::Unrecoverable("HhasAdata may not be empty".into())); } TypedValue::HhasAdata(d) => { let arrayid = get_array_identifier(alloc, e, tv); match &d[..1] { VARRAY_PREFIX | VEC_PREFIX => InstructLitConst::Vec(arrayid), DARRAY_PREFIX | DICT_PREFIX => InstructLitConst::Dict(arrayid), KEYSET_PREFIX => InstructLitConst::Keyset(arrayid), _ => { return Err(Error::Unrecoverable(format!( "Unknown HhasAdata data: {}", d ))); } } } }) }; Ok(()) } pub fn get_array_identifier<'arena>( alloc: &'arena bumpalo::Bump, e: &mut Emitter<'arena>, tv: &TypedValue<'arena>, ) -> &'arena str { if e.options().hhvm.flags.contains(HhvmFlags::ARRAY_PROVENANCE) { next_adata_id(alloc, e, tv) } else { match e.emit_adata_state_mut(alloc).array_identifier_map.get(tv) { None => { let id = next_adata_id(alloc, e, tv); e.emit_adata_state_mut(alloc) .array_identifier_map .insert(tv.clone(), id); id } Some(id) => id, } } } fn next_adata_id<'arena>( alloc: &'arena bumpalo::Bump, e: &mut Emitter<'arena>, value: &TypedValue<'arena>, ) -> &'arena str { let mut state = e.emit_adata_state_mut(alloc); let id = format!("A_{}", state.array_identifier_counter); state.array_identifier_counter += 1; state.adata.push(HhasAdata { id: id.clone(), value: value.clone(), }); bumpalo::collections::String::from_str_in(id.as_str(), alloc).into_bump_str() } pub fn take<'arena>(alloc: &'arena bumpalo::Bump, e: &mut Emitter<'arena>) -> AdataState<'arena> { let state = e.emit_adata_state_mut(alloc); std::mem::take(state) } #[cfg(test)] mod tests { use super::*; // verify it compiles (no test attribute) #[allow(dead_code)] #[allow(clippy::needless_lifetimes)] fn ref_state_from_emiter<'arena>(e: &Emitter<'arena>) { let _: &AdataState = e.emit_adata_state(); } // verify it compiles (no test attribute) #[allow(dead_code)] #[allow(clippy::needless_lifetimes)] fn mut_state_from_emiter<'arena>(alloc: &'arena bumpalo::Bump, e: &mut Emitter<'arena>) { let _: &mut AdataState = e.emit_adata_state_mut(alloc); } }
37.006803
98
0.580699
22622f7c1bf6f0a90f58b9daf7415b155056403c
1,188
//! The `cat` sub-command. //! //! Read a sequence of Cranelift IR files and print them again to stdout. This has the effect of //! normalizing formatting and removing comments. use crate::utils::read_to_string; use anyhow::{Context, Result}; use clap::Parser; use cranelift_reader::parse_functions; use std::path::{Path, PathBuf}; /// Outputs .clif file #[derive(Parser)] pub struct Options { /// Specify input file(s) to be used. Use '-' for stdin. #[clap(required = true)] files: Vec<PathBuf>, /// Enable debug output on stderr/stdout #[clap(short)] debug: bool, } pub fn run(options: &Options) -> Result<()> { crate::handle_debug_flag(options.debug); for (i, f) in options.files.iter().enumerate() { if i != 0 { println!(); } cat_one(f)? } Ok(()) } fn cat_one(path: &Path) -> Result<()> { let buffer = read_to_string(path)?; let items = parse_functions(&buffer).with_context(|| format!("failed to parse {}", path.display()))?; for (idx, func) in items.into_iter().enumerate() { if idx != 0 { println!(); } print!("{}", func); } Ok(()) }
24.244898
97
0.590067