hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
914648688a5dc97f23312b092860df21d3a7d1d7 | 1,975 | use crate::{
gc::{Finalize, Trace},
syntax::ast::node::{join_nodes, Node},
};
use boa_interner::{Interner, ToInternedString};
#[cfg(feature = "deser")]
use serde::{Deserialize, Serialize};
#[cfg(test)]
mod tests;
/// Calling the function actually performs the specified actions with the indicated parameters.
///
/// Defining a function does not execute it. Defining it simply names the function and
/// specifies what to do when the function is called. Functions must be in scope when they are
/// called, but the function declaration can be hoisted. The scope of a function is the
/// function in which it is declared (or the entire program, if it is declared at the top
/// level).
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#prod-CallExpression
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Functions#Calling_functions
#[cfg_attr(feature = "deser", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, Trace, Finalize, PartialEq)]
pub struct Call {
expr: Box<Node>,
args: Box<[Node]>,
}
impl Call {
/// Creates a new `Call` AST node.
pub fn new<E, A>(expr: E, args: A) -> Self
where
E: Into<Node>,
A: Into<Box<[Node]>>,
{
Self {
expr: Box::new(expr.into()),
args: args.into(),
}
}
/// Gets the name of the function call.
pub fn expr(&self) -> &Node {
&self.expr
}
/// Retrieves the arguments passed to the function.
pub fn args(&self) -> &[Node] {
&self.args
}
}
impl ToInternedString for Call {
fn to_interned_string(&self, interner: &Interner) -> String {
format!(
"{}({})",
self.expr.to_interned_string(interner),
join_nodes(interner, &self.args)
)
}
}
impl From<Call> for Node {
fn from(call: Call) -> Self {
Self::Call(call)
}
}
| 27.054795 | 100 | 0.618228 |
fef2fd9b00ee5b4b27509b5185e4d07838009156 | 5,712 | //! Convert to/from external::Tx.
use crate::{convert::ConversionError, external};
use mc_transaction_core::{ring_signature::SignatureRctBulletproofs, tx};
use std::convert::TryFrom;
/// Convert mc_transaction_core::tx::Tx --> external::Tx.
impl From<&tx::Tx> for external::Tx {
fn from(source: &tx::Tx) -> Self {
let mut tx = external::Tx::new();
tx.set_prefix(external::TxPrefix::from(&source.prefix));
tx.set_signature(external::SignatureRctBulletproofs::from(&source.signature));
tx
}
}
/// Convert external::Tx --> mc_transaction_core::tx::Tx.
impl TryFrom<&external::Tx> for tx::Tx {
type Error = ConversionError;
fn try_from(source: &external::Tx) -> Result<Self, Self::Error> {
let prefix = tx::TxPrefix::try_from(source.get_prefix())?;
let signature = SignatureRctBulletproofs::try_from(source.get_signature())?;
Ok(tx::Tx { prefix, signature })
}
}
#[cfg(test)]
mod tests {
use super::*;
use mc_account_keys::{AccountKey, PublicAddress};
use mc_crypto_keys::RistrettoPublic;
use mc_transaction_core::{
onetime_keys::recover_onetime_private_key,
tokens::Mob,
tx::{Tx, TxOut, TxOutMembershipProof},
BlockVersion, Token,
};
use mc_transaction_core_test_utils::MockFogResolver;
use mc_transaction_std::{EmptyMemoBuilder, InputCredentials, TransactionBuilder};
use protobuf::Message;
use rand::{rngs::StdRng, SeedableRng};
#[test]
/// Tx --> externalTx --> Tx should be the identity function.
fn test_convert_tx() {
// Generate a Tx to test with. This is copied from
// transaction_builder.rs::test_simple_transaction
let mut rng: StdRng = SeedableRng::from_seed([1u8; 32]);
for block_version in BlockVersion::iterator() {
let alice = AccountKey::random(&mut rng);
let bob = AccountKey::random(&mut rng);
let charlie = AccountKey::random(&mut rng);
let minted_outputs: Vec<TxOut> = {
// Mint an initial collection of outputs, including one belonging to
// `sender_account`.
let mut recipient_and_amounts: Vec<(PublicAddress, u64)> = Vec::new();
recipient_and_amounts.push((alice.default_subaddress(), 65536));
// Some outputs belonging to this account will be used as mix-ins.
recipient_and_amounts.push((charlie.default_subaddress(), 65536));
recipient_and_amounts.push((charlie.default_subaddress(), 65536));
mc_transaction_core_test_utils::get_outputs(
block_version,
&recipient_and_amounts,
&mut rng,
)
};
let mut transaction_builder = TransactionBuilder::new(
block_version,
Mob::ID,
MockFogResolver::default(),
EmptyMemoBuilder::default(),
);
let ring: Vec<TxOut> = minted_outputs.clone();
let public_key = RistrettoPublic::try_from(&minted_outputs[0].public_key).unwrap();
let onetime_private_key = recover_onetime_private_key(
&public_key,
alice.view_private_key(),
&alice.default_subaddress_spend_private(),
);
let membership_proofs: Vec<TxOutMembershipProof> = ring
.iter()
.map(|_tx_out| {
// TransactionBuilder does not validate membership proofs, but does require one
// for each ring member.
TxOutMembershipProof::new(0, 0, Default::default())
})
.collect();
let input_credentials = InputCredentials::new(
ring.clone(),
membership_proofs,
0,
onetime_private_key,
*alice.view_private_key(),
)
.unwrap();
transaction_builder.add_input(input_credentials);
transaction_builder.set_fee(0).unwrap();
transaction_builder
.add_output(65536, &bob.default_subaddress(), &mut rng)
.unwrap();
let tx = transaction_builder.build(&mut rng).unwrap();
// decode(encode(tx)) should be the identity function.
{
let bytes = mc_util_serial::encode(&tx);
let recovered_tx = mc_util_serial::decode(&bytes).unwrap();
assert_eq!(tx, recovered_tx);
}
// Converting mc_transaction_core::Tx -> external::Tx -> mc_transaction_core::Tx
// should be the identity function.
{
let external_tx: external::Tx = external::Tx::from(&tx);
let recovered_tx: Tx = Tx::try_from(&external_tx).unwrap();
assert_eq!(tx, recovered_tx);
}
// Encoding with prost, decoding with protobuf should be the identity function.
{
let bytes = mc_util_serial::encode(&tx);
let recovered_tx = external::Tx::parse_from_bytes(&bytes).unwrap();
assert_eq!(recovered_tx, external::Tx::from(&tx));
}
// Encoding with protobuf, decoding with prost should be the identity function.
{
let external_tx: external::Tx = external::Tx::from(&tx);
let bytes = external_tx.write_to_bytes().unwrap();
let recovered_tx: Tx = mc_util_serial::decode(&bytes).unwrap();
assert_eq!(tx, recovered_tx);
}
}
}
}
| 39.393103 | 99 | 0.578081 |
ef99df341db96e0ad13f0d64414c2704633270ba | 82,038 | use prolog_parser::clause_name;
use crate::codegen::*;
use crate::debray_allocator::*;
use crate::indexing::{merge_clause_index, remove_index, IndexingCodePtr};
use crate::machine::load_state::*;
use crate::machine::loader::*;
use crate::machine::preprocessor::*;
use crate::machine::term_stream::*;
use crate::machine::*;
use slice_deque::{sdeq, SliceDeque};
use std::cell::Cell;
use std::collections::VecDeque;
use std::ops::Range;
struct StandaloneCompileResult {
clause_code: Code,
standalone_skeleton: PredicateSkeleton,
}
pub(super) fn bootstrapping_compile(
stream: Stream,
wam: &mut Machine,
listing_src: ListingSource,
) -> Result<(), SessionError> {
let stream = &mut parsing_stream(stream)?;
let term_stream = BootstrappingTermStream::from_prolog_stream(
stream,
wam.machine_st.atom_tbl.clone(),
wam.machine_st.flags,
listing_src,
);
let loader = Loader::new(term_stream, wam);
loader.load()?;
Ok(())
}
// throw errors if declaration or query found.
pub(super) fn compile_relation(
cg: &mut CodeGenerator<DebrayAllocator>,
tl: &TopLevel,
) -> Result<Code, CompilationError> {
match tl {
&TopLevel::Query(_) => Err(CompilationError::ExpectedRel),
&TopLevel::Predicate(ref clauses) => cg.compile_predicate(&clauses),
&TopLevel::Fact(ref fact, ..) => Ok(cg.compile_fact(fact)),
&TopLevel::Rule(ref rule, ..) => cg.compile_rule(rule),
}
}
pub(super) fn compile_appendix(
code: &mut Code,
mut queue: VecDeque<TopLevel>,
jmp_by_locs: Vec<usize>,
non_counted_bt: bool,
atom_tbl: TabledData<Atom>,
) -> Result<(), CompilationError> {
let mut jmp_by_locs = VecDeque::from(jmp_by_locs);
while let Some(jmp_by_offset) = jmp_by_locs.pop_front() {
let code_len = code.len();
match &mut code[jmp_by_offset] {
&mut Line::Control(ControlInstruction::JmpBy(_, ref mut offset, ..)) => {
*offset = code_len - jmp_by_offset;
}
_ => {
unreachable!()
}
}
// false because the inner predicate is a one-off, hence not extensible.
let settings = CodeGenSettings {
global_clock_tick: None,
is_extensible: false,
non_counted_bt,
};
let mut cg = CodeGenerator::<DebrayAllocator>::new(atom_tbl.clone(), settings);
let tl = queue.pop_front().unwrap();
let decl_code = compile_relation(&mut cg, &tl)?;
jmp_by_locs.extend(cg.jmp_by_locs.into_iter().map(|offset| offset + code.len()));
code.extend(decl_code.into_iter());
}
Ok(())
}
fn lower_bound_of_target_clause(skeleton: &PredicateSkeleton, target_pos: usize) -> usize {
if target_pos == 0 {
return 0;
}
let arg_num = skeleton.clauses[target_pos - 1].opt_arg_index_key.arg_num();
if arg_num == 0 {
return target_pos - 1;
}
let mut index_loc_opt = None;
for index in (0..target_pos).rev() {
let current_arg_num = skeleton.clauses[index].opt_arg_index_key.arg_num();
if current_arg_num == 0 || current_arg_num != arg_num {
return index + 1;
}
if let Some(index_loc) = index_loc_opt {
let current_index_loc = skeleton.clauses[index]
.opt_arg_index_key
.switch_on_term_loc();
if Some(index_loc) != current_index_loc {
return index + 1;
}
} else {
index_loc_opt = skeleton.clauses[index]
.opt_arg_index_key
.switch_on_term_loc();
}
}
0
}
fn derelictize_try_me_else(
code: &mut Code,
index: usize,
retraction_info: &mut RetractionInfo,
) -> Option<usize> {
match &mut code[index] {
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Next(0))) => None,
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Next(ref mut o))) => {
retraction_info.push_record(RetractionRecord::ReplacedDynamicElseOffset(index, *o));
Some(mem::replace(o, 0))
}
Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Next(0))) => None,
Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Next(ref mut o))) => {
retraction_info.push_record(RetractionRecord::ReplacedDynamicElseOffset(index, *o));
Some(mem::replace(o, 0))
}
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Fail(_)))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Fail(_))) => None,
Line::Choice(ChoiceInstruction::TryMeElse(0)) => None,
Line::Choice(ChoiceInstruction::TryMeElse(ref mut o)) => {
retraction_info.push_record(RetractionRecord::ModifiedTryMeElse(index, *o));
Some(mem::replace(o, 0))
}
_ => {
unreachable!()
}
}
}
fn merge_indices(
code: &mut Code,
target_index_loc: usize,
index_range: Range<usize>,
skeleton: &mut [ClauseIndexInfo],
retraction_info: &mut RetractionInfo,
) {
for clause_index in index_range {
if let Some(index_loc) = skeleton[clause_index]
.opt_arg_index_key
.switch_on_term_loc()
{
let clause_loc =
find_inner_choice_instr(code, skeleton[clause_index].clause_start, index_loc);
let target_indexing_line = to_indexing_line_mut(&mut code[target_index_loc]).unwrap();
skeleton[clause_index]
.opt_arg_index_key
.set_switch_on_term_loc(target_index_loc);
merge_clause_index(
target_indexing_line,
&mut skeleton[0..clause_index + 1],
clause_loc,
AppendOrPrepend::Append,
);
retraction_info.push_record(RetractionRecord::AddedIndex(
skeleton[clause_index].opt_arg_index_key.clone(),
clause_loc,
));
} else {
break;
}
}
}
fn find_outer_choice_instr(code: &Code, mut index: usize) -> usize {
loop {
match &code[index] {
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Next(i)))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Next(i)))
if *i > 0 =>
{
index += i;
}
_ => {
return index;
}
}
}
}
fn find_inner_choice_instr(code: &Code, mut index: usize, index_loc: usize) -> usize {
loop {
match &code[index] {
Line::Choice(ChoiceInstruction::TryMeElse(o))
| Line::Choice(ChoiceInstruction::RetryMeElse(o)) => {
if *o > 0 {
return index;
} else {
index = index_loc;
}
}
&Line::Choice(ChoiceInstruction::DynamicElse(_, _, next_or_fail)) => match next_or_fail
{
NextOrFail::Next(i) => {
if i == 0 {
index = index_loc;
} else {
return index;
}
}
NextOrFail::Fail(_) => {
index = index_loc;
}
},
&Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, next_or_fail)) => {
match next_or_fail {
NextOrFail::Next(i) => {
if i == 0 {
index = index_loc;
} else {
return index;
}
}
NextOrFail::Fail(_) => {
return index;
}
}
}
Line::Choice(ChoiceInstruction::TrustMe(_)) => {
return index;
}
Line::IndexingCode(indexing_code) => match &indexing_code[0] {
IndexingLine::Indexing(IndexingInstruction::SwitchOnTerm(_, v, ..)) => match v {
IndexingCodePtr::External(v) => {
index += v;
}
IndexingCodePtr::DynamicExternal(v) => match &code[index + v] {
&Line::Choice(ChoiceInstruction::DynamicInternalElse(
_,
_,
NextOrFail::Next(0),
)) => {
return index + v;
}
_ => {
index += v;
}
},
_ => unreachable!(),
},
_ => {
unreachable!();
}
},
Line::Control(ControlInstruction::RevJmpBy(offset)) => {
index -= offset;
}
_ => {
/* Here we land at the line after a TryMeElse(0),
* which happens iff a single clause belongs to the
* indexed subsequence. So, end the search by pointing
* to the original derelict TryMeElse.
*/
return index - 1;
}
}
}
}
fn remove_index_from_subsequence(
code: &mut Code,
opt_arg_index_key: &OptArgIndexKey,
clause_start: usize,
retraction_info: &mut RetractionInfo,
) {
if let Some(index_loc) = opt_arg_index_key.switch_on_term_loc() {
let clause_start = find_inner_choice_instr(code, clause_start, index_loc);
let target_indexing_line = to_indexing_line_mut(&mut code[index_loc]).unwrap();
let offset = clause_start - index_loc + 1;
remove_index(opt_arg_index_key, target_indexing_line, offset);
// TODO: this isn't sufficiently precise. The removed offset could
// appear anywhere inside an Internal record.
retraction_info.push_record(RetractionRecord::RemovedIndex(
index_loc,
opt_arg_index_key.clone(),
offset,
));
}
}
fn merge_indexed_subsequences(
code: &mut Code,
skeleton: &mut PredicateSkeleton,
lower_upper_bound: usize,
upper_lower_bound: usize,
retraction_info: &mut RetractionInfo,
) -> Option<IndexPtr> {
// patch the inner-threaded choice instructions to link the
// two sequences, patch lower_bound's outer-threaded choice
// instruction to TrustMe (or RetryMeElse), and derelict-ize
// target_pos + 1's inner TryMeElse.
let inner_trust_me_loc = skeleton.clauses[upper_lower_bound - 2].clause_start;
let inner_try_me_else_loc = find_inner_choice_instr(
code,
skeleton.clauses[upper_lower_bound].clause_start,
skeleton.clauses[upper_lower_bound]
.opt_arg_index_key
.switch_on_term_loc()
.unwrap(),
);
match &mut code[inner_try_me_else_loc] {
Line::Choice(ChoiceInstruction::TryMeElse(ref mut o)) => {
retraction_info.push_record(RetractionRecord::ModifiedTryMeElse(
inner_try_me_else_loc,
*o,
));
match *o {
0 => {
code[inner_try_me_else_loc] = Line::Choice(ChoiceInstruction::TrustMe(0));
}
o => match &code[inner_try_me_else_loc + o] {
Line::Control(ControlInstruction::RevJmpBy(0)) => {
code[inner_try_me_else_loc] = Line::Choice(ChoiceInstruction::TrustMe(o));
}
_ => {
code[inner_try_me_else_loc] =
Line::Choice(ChoiceInstruction::RetryMeElse(o));
}
},
}
}
_ => {}
}
thread_choice_instr_at_to(
code,
inner_trust_me_loc,
inner_try_me_else_loc,
retraction_info,
);
let mut end_of_upper_lower_bound = None;
for index in upper_lower_bound..skeleton.clauses.len() {
if !skeleton.clauses[index].opt_arg_index_key.is_some() {
end_of_upper_lower_bound = Some(index);
break;
}
}
let outer_threaded_choice_instr_loc = skeleton.clauses[lower_upper_bound].clause_start - 2;
match end_of_upper_lower_bound {
Some(outer_threaded_clause_index) => {
thread_choice_instr_at_to(
code,
outer_threaded_choice_instr_loc,
skeleton.clauses[outer_threaded_clause_index].clause_start,
retraction_info,
);
}
None => match &mut code[outer_threaded_choice_instr_loc] {
Line::Choice(ChoiceInstruction::TryMeElse(ref mut o)) => {
retraction_info
.push_record(RetractionRecord::ModifiedTryMeElse(inner_trust_me_loc, *o));
*o = 0;
return Some(IndexPtr::Index(outer_threaded_choice_instr_loc + 1));
}
_ => {}
},
}
None
}
fn delete_from_skeleton(
compilation_target: CompilationTarget,
key: PredicateKey,
skeleton: &mut PredicateSkeleton,
target_pos: usize,
retraction_info: &mut RetractionInfo,
) -> usize {
let clause_index_info = skeleton.clauses.remove(target_pos);
let clause_clause_loc = skeleton.core.clause_clause_locs.remove(target_pos);
if target_pos < skeleton.core.clause_assert_margin {
skeleton.core.clause_assert_margin -= 1;
}
retraction_info.push_record(RetractionRecord::RemovedSkeletonClause(
compilation_target,
key,
target_pos,
clause_index_info,
clause_clause_loc,
));
clause_clause_loc
}
fn blunt_leading_choice_instr(
code: &mut Code,
mut instr_loc: usize,
retraction_info: &mut RetractionInfo,
) -> usize {
loop {
match &mut code[instr_loc] {
Line::Choice(ChoiceInstruction::RetryMeElse(o)) => {
retraction_info.push_record(RetractionRecord::ModifiedRetryMeElse(instr_loc, *o));
code[instr_loc] = Line::Choice(ChoiceInstruction::TryMeElse(*o));
return instr_loc;
}
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Next(_)))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Next(_))) => {
return instr_loc;
}
&mut Line::Choice(ChoiceInstruction::DynamicElse(b, d, NextOrFail::Fail(o))) => {
retraction_info.push_record(RetractionRecord::AppendedNextOrFail(
instr_loc,
NextOrFail::Fail(o),
));
code[instr_loc] =
Line::Choice(ChoiceInstruction::DynamicElse(b, d, NextOrFail::Next(0)));
return instr_loc;
}
&mut Line::Choice(ChoiceInstruction::DynamicInternalElse(
b,
d,
NextOrFail::Fail(o),
)) => {
retraction_info.push_record(RetractionRecord::AppendedNextOrFail(
instr_loc,
NextOrFail::Fail(o),
));
code[instr_loc] = Line::Choice(ChoiceInstruction::DynamicInternalElse(
b,
d,
NextOrFail::Next(0),
));
return instr_loc;
}
Line::Choice(ChoiceInstruction::TrustMe(o)) => {
retraction_info
.push_record(RetractionRecord::AppendedTrustMe(instr_loc, *o, false));
code[instr_loc] = Line::Choice(ChoiceInstruction::TryMeElse(0));
return instr_loc + 1;
}
Line::Choice(ChoiceInstruction::TryMeElse(0)) => {
return instr_loc + 1;
}
Line::Choice(ChoiceInstruction::TryMeElse(o)) => {
instr_loc += *o;
}
Line::Control(ControlInstruction::RevJmpBy(o)) => {
instr_loc -= *o;
}
_ => {
unreachable!()
}
}
}
}
fn set_switch_var_offset_to_choice_instr(
code: &mut Code,
index_loc: usize,
offset: usize,
retraction_info: &mut RetractionInfo,
) {
let target_indexing_line = to_indexing_line_mut(&mut code[index_loc]).unwrap();
let v = match &target_indexing_line[0] {
&IndexingLine::Indexing(IndexingInstruction::SwitchOnTerm(_, v, ..)) => match v {
IndexingCodePtr::External(v) | IndexingCodePtr::DynamicExternal(v) => v,
_ => unreachable!(),
},
_ => {
unreachable!();
}
};
match &code[index_loc + v] {
Line::Choice(ChoiceInstruction::TryMeElse(_))
| Line::Choice(ChoiceInstruction::DynamicElse(..))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(..)) => {}
_ => {
set_switch_var_offset(code, index_loc, offset, retraction_info);
}
}
}
#[inline]
fn set_switch_var_offset(
code: &mut Code,
index_loc: usize,
offset: usize,
retraction_info: &mut RetractionInfo,
) {
let target_indexing_line = to_indexing_line_mut(&mut code[index_loc]).unwrap();
let old_v = match &mut target_indexing_line[0] {
IndexingLine::Indexing(IndexingInstruction::SwitchOnTerm(_, ref mut v, ..)) => match *v {
IndexingCodePtr::DynamicExternal(_) => {
mem::replace(v, IndexingCodePtr::DynamicExternal(offset))
}
IndexingCodePtr::External(_) => mem::replace(v, IndexingCodePtr::External(offset)),
_ => unreachable!(),
},
_ => {
unreachable!()
}
};
retraction_info.push_record(RetractionRecord::ReplacedSwitchOnTermVarIndex(
index_loc, old_v,
));
}
fn internalize_choice_instr_at(
code: &mut Code,
instr_loc: usize,
retraction_info: &mut RetractionInfo,
) {
match &mut code[instr_loc] {
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Fail(_)))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Fail(_))) => {}
Line::Choice(ChoiceInstruction::DynamicElse(_, _, ref mut o @ NextOrFail::Next(0))) => {
retraction_info.push_record(RetractionRecord::ReplacedDynamicElseOffset(instr_loc, 0));
*o = NextOrFail::Fail(0);
}
&mut Line::Choice(ChoiceInstruction::DynamicElse(b, d, NextOrFail::Next(o))) => {
retraction_info.push_record(RetractionRecord::AppendedNextOrFail(
instr_loc,
NextOrFail::Next(o),
));
match &mut code[instr_loc + o] {
Line::Control(ControlInstruction::RevJmpBy(p)) if *p == 0 => {
code[instr_loc] =
Line::Choice(ChoiceInstruction::DynamicElse(b, d, NextOrFail::Fail(o)));
}
_ => {
code[instr_loc] =
Line::Choice(ChoiceInstruction::DynamicElse(b, d, NextOrFail::Next(o)));
}
}
}
Line::Choice(ChoiceInstruction::DynamicInternalElse(
_,
_,
ref mut o @ NextOrFail::Next(0),
)) => {
retraction_info.push_record(RetractionRecord::ReplacedDynamicElseOffset(instr_loc, 0));
*o = NextOrFail::Fail(0);
}
&mut Line::Choice(ChoiceInstruction::DynamicInternalElse(b, d, NextOrFail::Next(o))) => {
retraction_info.push_record(RetractionRecord::ReplacedDynamicElseOffset(instr_loc, o));
match &mut code[instr_loc + o] {
Line::Control(ControlInstruction::RevJmpBy(p)) if *p == 0 => {
code[instr_loc] = Line::Choice(ChoiceInstruction::DynamicInternalElse(
b,
d,
NextOrFail::Fail(o),
));
}
_ => {
code[instr_loc] = Line::Choice(ChoiceInstruction::DynamicInternalElse(
b,
d,
NextOrFail::Next(o),
));
}
}
}
Line::Choice(ChoiceInstruction::TryMeElse(0)) => {
retraction_info.push_record(RetractionRecord::ModifiedTryMeElse(instr_loc, 0));
code[instr_loc] = Line::Choice(ChoiceInstruction::TrustMe(0));
}
Line::Choice(ChoiceInstruction::TryMeElse(o)) => {
let o = *o;
retraction_info.push_record(RetractionRecord::ModifiedTryMeElse(instr_loc, o));
match &mut code[instr_loc + o] {
Line::Control(ControlInstruction::RevJmpBy(p)) if *p == 0 => {
code[instr_loc] = Line::Choice(ChoiceInstruction::TrustMe(o));
}
_ => {
code[instr_loc] = Line::Choice(ChoiceInstruction::RetryMeElse(o));
}
}
}
_ => {
unreachable!();
}
}
}
fn thread_choice_instr_at_to(
code: &mut Code,
mut instr_loc: usize,
target_loc: usize,
retraction_info: &mut RetractionInfo,
) {
loop {
match &mut code[instr_loc] {
Line::Choice(ChoiceInstruction::TryMeElse(ref mut o))
| Line::Choice(ChoiceInstruction::RetryMeElse(ref mut o))
if target_loc >= instr_loc =>
{
retraction_info.push_record(RetractionRecord::ReplacedChoiceOffset(instr_loc, *o));
*o = target_loc - instr_loc;
return;
}
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Next(ref mut o)))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(
_,
_,
NextOrFail::Next(ref mut o),
)) if target_loc >= instr_loc => {
retraction_info
.push_record(RetractionRecord::ReplacedDynamicElseOffset(instr_loc, *o));
*o = target_loc - instr_loc;
return;
}
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Next(o)))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Next(o))) => {
instr_loc += *o;
}
Line::Choice(ChoiceInstruction::TryMeElse(o))
| Line::Choice(ChoiceInstruction::RetryMeElse(o)) => {
instr_loc += *o;
}
Line::Control(ControlInstruction::RevJmpBy(ref mut o)) if instr_loc >= target_loc => {
retraction_info.push_record(RetractionRecord::ModifiedRevJmpBy(instr_loc, *o));
*o = instr_loc - target_loc;
return;
}
&mut Line::Control(ControlInstruction::RevJmpBy(o)) => {
instr_loc -= o;
}
&mut Line::Choice(ChoiceInstruction::DynamicElse(birth, death, ref mut fail))
if target_loc >= instr_loc =>
{
retraction_info.push_record(RetractionRecord::AppendedNextOrFail(instr_loc, *fail));
code[instr_loc] = Line::Choice(ChoiceInstruction::DynamicElse(
birth,
death,
NextOrFail::Next(target_loc - instr_loc),
));
return;
}
Line::Choice(ChoiceInstruction::DynamicElse(_, _, NextOrFail::Fail(o))) if *o > 0 => {
instr_loc += *o;
}
&mut Line::Choice(ChoiceInstruction::DynamicInternalElse(
birth,
death,
ref mut fail,
)) if target_loc >= instr_loc => {
retraction_info.push_record(RetractionRecord::AppendedNextOrFail(instr_loc, *fail));
code[instr_loc] = Line::Choice(ChoiceInstruction::DynamicInternalElse(
birth,
death,
NextOrFail::Next(target_loc - instr_loc),
));
return;
}
Line::Choice(ChoiceInstruction::DynamicInternalElse(_, _, NextOrFail::Fail(o)))
if *o > 0 =>
{
instr_loc += *o;
}
Line::Choice(ChoiceInstruction::TrustMe(ref mut o)) if target_loc >= instr_loc => {
retraction_info.push_record(
RetractionRecord::AppendedTrustMe(instr_loc, *o, false),
//choice_instr.is_default()),
);
code[instr_loc] =
Line::Choice(ChoiceInstruction::RetryMeElse(target_loc - instr_loc));
return;
}
Line::Choice(ChoiceInstruction::TrustMe(o)) if *o > 0 => {
instr_loc += *o;
}
_ => {
unreachable!()
}
}
}
}
fn remove_non_leading_clause(
code: &mut Code,
preceding_choice_instr_loc: usize,
non_indexed_choice_instr_loc: usize,
retraction_info: &mut RetractionInfo,
) -> Option<IndexPtr> {
match &mut code[non_indexed_choice_instr_loc] {
Line::Choice(ChoiceInstruction::RetryMeElse(ref mut o)) => {
let o = *o;
thread_choice_instr_at_to(
code,
preceding_choice_instr_loc,
non_indexed_choice_instr_loc + o,
retraction_info,
);
None
}
Line::Choice(ChoiceInstruction::TrustMe(_)) => {
match &mut code[preceding_choice_instr_loc] {
Line::Choice(ChoiceInstruction::RetryMeElse(o)) => {
retraction_info.push_record(RetractionRecord::ModifiedRetryMeElse(
preceding_choice_instr_loc,
*o,
));
code[preceding_choice_instr_loc] = Line::Choice(ChoiceInstruction::TrustMe(0));
None
}
Line::Choice(ChoiceInstruction::TryMeElse(ref mut o)) => {
retraction_info.push_record(RetractionRecord::ModifiedTryMeElse(
preceding_choice_instr_loc,
*o,
));
*o = 0;
Some(IndexPtr::Index(preceding_choice_instr_loc + 1))
}
_ => {
unreachable!();
}
}
}
_ => {
unreachable!();
}
}
}
fn finalize_retract(
key: PredicateKey,
compilation_target: CompilationTarget,
skeleton: &mut PredicateSkeleton,
code_index: CodeIndex,
target_pos: usize,
index_ptr_opt: Option<IndexPtr>,
retraction_info: &mut RetractionInfo,
) -> usize {
let clause_clause_loc = delete_from_skeleton(
compilation_target.clone(),
key.clone(),
skeleton,
target_pos,
retraction_info,
);
if let Some(index_ptr) = index_ptr_opt {
set_code_index(
retraction_info,
&compilation_target,
key,
&code_index,
index_ptr,
);
}
clause_clause_loc
}
fn remove_leading_unindexed_clause(
code: &mut Code,
non_indexed_choice_instr_loc: usize,
retraction_info: &mut RetractionInfo,
) -> Option<IndexPtr> {
match &mut code[non_indexed_choice_instr_loc] {
Line::Choice(ChoiceInstruction::TryMeElse(ref mut o)) => {
if *o > 0 {
retraction_info.push_record(RetractionRecord::ModifiedTryMeElse(
non_indexed_choice_instr_loc,
*o,
));
let o = mem::replace(o, 0);
let index_ptr = blunt_leading_choice_instr(
code,
non_indexed_choice_instr_loc + o,
retraction_info,
);
Some(IndexPtr::Index(index_ptr))
} else {
Some(IndexPtr::DynamicUndefined)
}
}
_ => {
unreachable!();
}
}
}
fn find_dynamic_outer_choice_instr(code: &Code, index_loc: usize) -> usize {
match &code[index_loc] {
Line::IndexingCode(indexing_code) => match &indexing_code[0] {
&IndexingLine::Indexing(IndexingInstruction::SwitchOnTerm(
_,
IndexingCodePtr::DynamicExternal(v),
..,
)) => index_loc + v - 2,
_ => unreachable!(),
},
_ => unreachable!(),
}
}
fn prepend_compiled_clause(
code: &mut Code,
compilation_target: CompilationTarget,
key: PredicateKey,
mut clause_code: Code,
skeleton: &mut PredicateSkeleton,
retraction_info: &mut RetractionInfo,
global_clock_tick: usize,
) -> IndexPtr {
let clause_loc = code.len();
let mut prepend_queue = sdeq![];
let target_arg_num = skeleton.clauses[0].opt_arg_index_key.arg_num();
let head_arg_num = skeleton.clauses[1].opt_arg_index_key.arg_num();
let settings = CodeGenSettings {
global_clock_tick: if skeleton.core.is_dynamic {
Some(global_clock_tick)
} else {
None
},
is_extensible: true,
non_counted_bt: false,
};
let clause_loc = if skeleton.clauses[0]
.opt_arg_index_key
.switch_on_term_loc()
.is_some()
{
match skeleton.clauses[1].opt_arg_index_key.switch_on_term_loc() {
Some(index_loc) if target_arg_num == head_arg_num => {
prepend_queue.extend(clause_code.drain(3..));
skeleton.clauses[0].opt_arg_index_key += index_loc - 1;
skeleton.clauses[0].clause_start = clause_loc + 2;
retraction_info.push_record(RetractionRecord::AddedIndex(
skeleton.clauses[0].opt_arg_index_key.clone(),
skeleton.clauses[0].clause_start,
));
let outer_thread_choice_loc = if skeleton.core.is_dynamic {
find_dynamic_outer_choice_instr(code, index_loc)
} else {
skeleton.clauses[1].clause_start - 2
};
retraction_info.push_record(RetractionRecord::SkeletonClauseStartReplaced(
compilation_target,
key.clone(),
1,
skeleton.clauses[1].clause_start,
));
skeleton.clauses[1].clause_start =
find_inner_choice_instr(code, skeleton.clauses[1].clause_start, index_loc);
let inner_thread_rev_offset =
3 + prepend_queue.len() + clause_loc - skeleton.clauses[1].clause_start;
prepend_queue.push_back(Line::Control(ControlInstruction::RevJmpBy(
inner_thread_rev_offset,
)));
prepend_queue.push_front(Line::Choice(
settings.internal_try_me_else(prepend_queue.len()),
));
// prepend_queue is now:
// | TryMeElse N_2
// | (clause_code)
// +N_2 | RevJmpBy (RetryMeElse(M_1) or TryMeElse(0) at index_loc + 1)
prepend_queue.push_front(Line::Control(ControlInstruction::RevJmpBy(
1 + clause_loc - index_loc,
)));
let outer_thread_choice_offset = // outer_thread_choice_loc WAS index_loc - 1..
match derelictize_try_me_else(code, outer_thread_choice_loc, retraction_info) {
Some(next_subseq_offset) => {
// skeleton.clauses[1] has a non-stub TryMeElse.
let outer_thread_rev_offset =
prepend_queue.len() + 1 + clause_loc - outer_thread_choice_loc -
next_subseq_offset;
prepend_queue.push_back(
Line::Control(ControlInstruction::RevJmpBy(outer_thread_rev_offset))
);
prepend_queue.len()
}
None => {
// This case occurs when the clauses of
// the host predicate, up to and including
// the prepending of this clause, are
// indexed.
// The outer TryMeElse / RevJmpBy pushed
// in this case are stub instructions
// awaiting the addition of unindexed
// clauses.
prepend_queue.push_back(
Line::Control(ControlInstruction::RevJmpBy(0)),
);
0
}
};
prepend_queue.push_front(Line::Choice(
settings.try_me_else(outer_thread_choice_offset),
));
// prepend_queue is now:
// | TryMeElse N_3
// | RevJmpBy (SwitchOnTerm at index_loc)
// | TryMeElse N_2
// | (clause_code)
// N_2 | RevJmpBy (RetryMeElse(M_1) or TryMeElse(0) at index_loc + 1)
// N_3 | RevJmpBy (TryMeElse(N_1) at index_loc - 1 or TrustMe if N_1 == 0)
let target_indexing_line = to_indexing_line_mut(&mut code[index_loc]).unwrap();
merge_clause_index(
target_indexing_line,
&mut skeleton.clauses,
clause_loc + 2, // == skeleton.clauses[0].clause_start
AppendOrPrepend::Prepend,
);
set_switch_var_offset(code, index_loc, clause_loc - index_loc + 2, retraction_info);
internalize_choice_instr_at(
code,
skeleton.clauses[1].clause_start,
retraction_info,
);
code.extend(prepend_queue.into_iter());
if skeleton.core.is_dynamic {
clause_loc
} else {
clause_loc + (outer_thread_choice_offset == 0) as usize
}
}
_ => {
prepend_queue.extend(clause_code.drain(1..));
skeleton.clauses[0].opt_arg_index_key += clause_loc;
skeleton.clauses[0].clause_start = clause_loc + 2;
let old_clause_start =
match skeleton.clauses[1].opt_arg_index_key.switch_on_term_loc() {
Some(index_loc) if skeleton.core.is_dynamic => {
find_dynamic_outer_choice_instr(code, index_loc)
}
Some(_) => skeleton.clauses[1].clause_start - 2,
None => skeleton.clauses[1].clause_start,
};
let inner_thread_rev_offset =
2 + prepend_queue.len() + clause_loc - old_clause_start;
// this is a stub for chaining inner-threaded choice
// instructions.
prepend_queue.push_back(Line::Control(ControlInstruction::RevJmpBy(0)));
let prepend_queue_len = prepend_queue.len();
match &mut prepend_queue[1] {
Line::Choice(ChoiceInstruction::TryMeElse(ref mut o)) if *o == 0 => {
*o = prepend_queue_len - 2;
}
Line::Choice(ChoiceInstruction::DynamicInternalElse(
_,
_,
ref mut o @ NextOrFail::Next(0),
)) => {
*o = NextOrFail::Fail(prepend_queue_len - 2);
}
_ => {
unreachable!();
}
}
prepend_queue.push_back(Line::Control(ControlInstruction::RevJmpBy(
inner_thread_rev_offset,
)));
prepend_queue.push_front(Line::Choice(settings.try_me_else(prepend_queue.len())));
// prepend_queue is now:
// | TryMeElse(N_2)
// | SwitchOnTerm 2, ...
// | TryMeElse(0)
// | (clause_code)
// +N_2 | RevJmpBy (RetryMeElse(M_1))
internalize_choice_instr_at(code, old_clause_start, retraction_info);
code.extend(prepend_queue.into_iter());
clause_loc // + (outer_thread_choice_offset == 0 as usize)
}
}
} else {
match skeleton.clauses[1].opt_arg_index_key.switch_on_term_loc() {
Some(index_loc) => {
prepend_queue.extend(clause_code.drain(1..));
let old_clause_start = if skeleton.core.is_dynamic {
find_dynamic_outer_choice_instr(code, index_loc)
} else {
skeleton.clauses[1].clause_start - 2
};
let inner_thread_rev_offset =
1 + prepend_queue.len() + clause_loc - old_clause_start;
prepend_queue.push_back(Line::Control(ControlInstruction::RevJmpBy(
inner_thread_rev_offset,
)));
prepend_queue.push_front(Line::Choice(settings.try_me_else(prepend_queue.len())));
// prepend_queue is now:
// | TryMeElse(N_2)
// | (clause_code)
// +N_2 | RevJmpBy (RetryMeElse(M_1))
internalize_choice_instr_at(code, old_clause_start, retraction_info);
code.extend(prepend_queue.into_iter());
// skeleton.clauses[0].opt_arg_index_key += clause_loc;
skeleton.clauses[0].clause_start = clause_loc;
clause_loc // + (outer_thread_choice_offset == 0 as usize)
}
None => {
prepend_queue.extend(clause_code.drain(1..));
let old_clause_start = skeleton.clauses[1].clause_start;
let inner_thread_rev_offset =
1 + prepend_queue.len() + clause_loc - old_clause_start;
prepend_queue.push_back(Line::Control(ControlInstruction::RevJmpBy(
inner_thread_rev_offset,
)));
prepend_queue.push_front(Line::Choice(settings.try_me_else(prepend_queue.len())));
// prepend_queue is now:
// | TryMeElse(N_2)
// | (clause_code)
// +N_2 | RevJmpBy (RetryMeElse(M_1))
internalize_choice_instr_at(code, old_clause_start, retraction_info);
code.extend(prepend_queue.into_iter());
// skeleton.clauses[0].opt_arg_index_key += clause_loc;
skeleton.clauses[0].clause_start = clause_loc;
clause_loc
}
}
};
if skeleton.core.is_dynamic {
IndexPtr::DynamicIndex(clause_loc)
} else {
IndexPtr::Index(clause_loc)
}
}
fn append_compiled_clause(
code: &mut Code,
mut clause_code: Code,
skeleton: &mut PredicateSkeleton,
retraction_info: &mut RetractionInfo,
global_clock_tick: usize,
) -> Option<IndexPtr> {
let clause_loc = code.len();
let target_pos = skeleton.clauses.len() - 1;
let lower_bound = lower_bound_of_target_clause(skeleton, target_pos);
let settings = CodeGenSettings {
global_clock_tick: if skeleton.core.is_dynamic {
Some(global_clock_tick)
} else {
None
},
is_extensible: true,
non_counted_bt: false,
};
skeleton.clauses[target_pos].clause_start = clause_loc;
let mut code_ptr_opt = None;
let lower_bound_arg_num = skeleton.clauses[lower_bound].opt_arg_index_key.arg_num();
let target_arg_num = skeleton.clauses[target_pos].opt_arg_index_key.arg_num();
let threaded_choice_instr_loc = match skeleton.clauses[lower_bound]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(index_loc) if lower_bound_arg_num == target_arg_num => {
code.push(Line::Choice(settings.internal_trust_me()));
code.extend(clause_code.drain(3..)); // skip the indexing code
// set skeleton[target_pos].opt_arg_index_key to
// index_loc. its original value is always 1.
skeleton.clauses[target_pos].opt_arg_index_key += index_loc - 1;
retraction_info.push_record(RetractionRecord::AddedIndex(
skeleton.clauses[target_pos].opt_arg_index_key.clone(),
skeleton.clauses[target_pos].clause_start,
));
let target_indexing_line = to_indexing_line_mut(&mut code[index_loc]).unwrap();
merge_clause_index(
target_indexing_line,
&mut skeleton.clauses[lower_bound..],
clause_loc,
AppendOrPrepend::Append,
);
let target_pos_clause_start = find_inner_choice_instr(
code,
skeleton.clauses[target_pos - 1].clause_start,
index_loc,
);
let target_pos_clause_start = find_outer_choice_instr(code, target_pos_clause_start);
if lower_bound + 1 == target_pos {
set_switch_var_offset_to_choice_instr(
code,
index_loc,
target_pos_clause_start - index_loc,
retraction_info,
);
if lower_bound == 0 && !skeleton.core.is_dynamic {
code_ptr_opt = Some(target_pos_clause_start);
}
}
target_pos_clause_start // skeleton.clauses[target_pos - 1].clause_start
}
_ => {
code.push(Line::Choice(settings.trust_me()));
skeleton.clauses[target_pos].opt_arg_index_key += clause_loc;
code.extend(clause_code.drain(1..));
match skeleton.clauses[target_pos]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(index_loc) => {
// point to the inner-threaded TryMeElse(0) if target_pos is
// indexed, and make switch_on_term point one line after it in
// its variable offset.
skeleton.clauses[target_pos].clause_start += 2;
if !skeleton.core.is_dynamic {
set_switch_var_offset(code, index_loc, 2, retraction_info);
}
}
None => {}
}
match skeleton.clauses[lower_bound]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(_) => {
if lower_bound == 0 {
code_ptr_opt = Some(skeleton.clauses[lower_bound].clause_start - 2);
}
find_outer_choice_instr(code, skeleton.clauses[lower_bound].clause_start - 2)
}
None => {
if lower_bound == 0 {
code_ptr_opt = Some(skeleton.clauses[lower_bound].clause_start);
}
find_outer_choice_instr(code, skeleton.clauses[lower_bound].clause_start)
}
}
}
};
thread_choice_instr_at_to(code, threaded_choice_instr_loc, clause_loc, retraction_info);
code_ptr_opt.map(|p| {
if skeleton.core.is_dynamic {
IndexPtr::DynamicIndex(p)
} else {
IndexPtr::Index(p)
}
})
}
#[inline]
fn mergeable_indexed_subsequences(
lower_bound: usize,
target_pos: usize,
skeleton: &PredicateSkeleton,
) -> bool {
let lower_bound_arg_num = skeleton.clauses[lower_bound].opt_arg_index_key.arg_num();
if target_pos + 1 < skeleton.clauses.len() {
let succ_arg_num = skeleton.clauses[target_pos + 1].opt_arg_index_key.arg_num();
let target_arg_num = skeleton.clauses[target_pos].opt_arg_index_key.arg_num();
return target_arg_num != succ_arg_num && lower_bound_arg_num == succ_arg_num;
}
false
}
fn print_overwrite_warning(
compilation_target: &CompilationTarget,
code_ptr: IndexPtr,
key: &PredicateKey,
is_dynamic: bool,
) {
if let CompilationTarget::Module(ref module_name) = compilation_target {
match module_name.as_str() {
"builtins" | "loader" => return,
_ => {}
}
}
match code_ptr {
IndexPtr::DynamicUndefined | IndexPtr::Undefined => return,
_ if is_dynamic => return,
_ => {}
}
println!("Warning: overwriting {}/{}", key.0, key.1);
}
impl<'a> LoadState<'a> {
pub(super) fn listing_src_file_name(&self) -> Option<ClauseName> {
if let Some(load_context) = self.wam.load_contexts.last() {
if !load_context.path.is_file() {
return None;
}
if let Some(path_str) = load_context.path.to_str() {
if !path_str.is_empty() {
return Some(clause_name!(
path_str.to_string(),
self.wam.machine_st.atom_tbl
));
}
}
}
None
}
fn compile_standalone_clause(
&mut self,
term: Term,
settings: CodeGenSettings,
atom_tbl: TabledData<Atom>,
) -> Result<StandaloneCompileResult, SessionError> {
let mut preprocessor = Preprocessor::new();
let mut cg = CodeGenerator::<DebrayAllocator>::new(atom_tbl.clone(), settings);
let clause = self.try_term_to_tl(term, &mut preprocessor)?;
let queue = preprocessor.parse_queue(self)?;
let mut clause_code = cg.compile_predicate(&vec![clause])?;
compile_appendix(
&mut clause_code,
queue,
cg.jmp_by_locs,
settings.non_counted_bt,
atom_tbl,
)?;
Ok(StandaloneCompileResult {
clause_code,
standalone_skeleton: cg.skeleton,
})
}
fn compile(
&mut self,
key: PredicateKey,
predicates: &mut PredicateQueue,
settings: CodeGenSettings,
) -> Result<CodeIndex, SessionError> {
let code_index =
self.get_or_insert_code_index(key.clone(), predicates.compilation_target.clone());
let code_len = self.wam.code_repo.code.len();
let mut code_ptr = code_len;
let mut cg =
CodeGenerator::<DebrayAllocator>::new(self.wam.machine_st.atom_tbl.clone(), settings);
let mut clauses = vec![];
let mut preprocessor = Preprocessor::new();
for term in predicates.predicates.drain(0..) {
clauses.push(self.try_term_to_tl(term, &mut preprocessor)?);
}
let queue = preprocessor.parse_queue(self)?;
let mut code = cg.compile_predicate(&clauses)?;
compile_appendix(
&mut code,
queue,
cg.jmp_by_locs,
settings.non_counted_bt,
self.wam.machine_st.atom_tbl.clone(),
)?;
if settings.is_extensible {
let mut clause_clause_locs = sdeq![];
for clause_index_info in cg.skeleton.clauses.iter_mut() {
clause_index_info.clause_start += code_len;
clause_index_info.opt_arg_index_key += code_len;
clause_clause_locs.push_back(clause_index_info.clause_start);
}
match &mut code[0] {
Line::Choice(ChoiceInstruction::TryMeElse(0)) => {
code_ptr += 1;
}
_ => {}
}
match self
.wam
.indices
.get_predicate_skeleton_mut(&predicates.compilation_target, &key)
{
Some(skeleton) => {
self.retraction_info
.push_record(RetractionRecord::SkeletonClauseTruncateBack(
predicates.compilation_target.clone(),
key.clone(),
skeleton.clauses.len(),
));
skeleton.clauses.extend(cg.skeleton.clauses.into_iter());
skeleton
.core
.clause_clause_locs
.extend_from_slice(&clause_clause_locs[0..]);
}
None => {
cg.skeleton
.core
.clause_clause_locs
.extend_from_slice(&clause_clause_locs[0..]);
self.add_extensible_predicate(
key.clone(),
cg.skeleton,
predicates.compilation_target.clone(),
);
}
};
self.extend_local_predicate_skeleton(
&predicates.compilation_target,
&key,
clause_clause_locs,
);
}
print_overwrite_warning(
&predicates.compilation_target,
code_index.get(),
&key,
settings.is_dynamic(),
);
let index_ptr = if settings.is_dynamic() {
IndexPtr::DynamicIndex(code_ptr)
} else {
IndexPtr::Index(code_ptr)
};
set_code_index(
&mut self.retraction_info,
&predicates.compilation_target,
key,
&code_index,
index_ptr,
);
self.wam.code_repo.code.extend(code.into_iter());
Ok(code_index)
}
fn extend_local_predicate_skeleton(
&mut self,
compilation_target: &CompilationTarget,
key: &PredicateKey,
clause_clause_locs: SliceDeque<usize>,
) {
match self.wam.indices.get_local_predicate_skeleton_mut(
self.compilation_target.clone(),
compilation_target.clone(),
self.listing_src_file_name(),
key.clone(),
) {
Some(skeleton) => {
self.retraction_info.push_record(
RetractionRecord::SkeletonLocalClauseTruncateBack(
self.compilation_target.clone(),
compilation_target.clone(),
key.clone(),
skeleton.clause_clause_locs.len(),
),
);
skeleton
.clause_clause_locs
.extend_from_slice(&clause_clause_locs[0..]);
}
None => {
let mut skeleton = LocalPredicateSkeleton::new();
skeleton.clause_clause_locs = clause_clause_locs;
self.add_local_extensible_predicate(
compilation_target.clone(),
key.clone(),
skeleton,
);
}
}
}
fn push_front_to_local_predicate_skeleton(
&mut self,
compilation_target: &CompilationTarget,
key: &PredicateKey,
code_len: usize,
) {
match self.wam.indices.get_local_predicate_skeleton_mut(
self.compilation_target.clone(),
compilation_target.clone(),
self.listing_src_file_name(),
key.clone(),
) {
Some(skeleton) => {
self.retraction_info.push_record(
RetractionRecord::SkeletonLocalClauseClausePopFront(
self.compilation_target.clone(),
compilation_target.clone(),
key.clone(),
),
);
skeleton.clause_clause_locs.push_front(code_len);
}
None => {
let mut skeleton = LocalPredicateSkeleton::new();
skeleton.clause_clause_locs.push_front(code_len);
self.add_local_extensible_predicate(
compilation_target.clone(),
key.clone(),
skeleton,
);
}
}
}
fn push_back_to_local_predicate_skeleton(
&mut self,
compilation_target: &CompilationTarget,
key: &PredicateKey,
code_len: usize,
) {
match self.wam.indices.get_local_predicate_skeleton_mut(
self.compilation_target.clone(),
compilation_target.clone(),
self.listing_src_file_name(),
key.clone(),
) {
Some(skeleton) => {
self.retraction_info.push_record(
RetractionRecord::SkeletonLocalClauseClausePopBack(
self.compilation_target.clone(),
compilation_target.clone(),
key.clone(),
),
);
skeleton.clause_clause_locs.push_back(code_len);
}
None => {
let mut skeleton = LocalPredicateSkeleton::new();
skeleton.clause_clause_locs.push_back(code_len);
self.add_local_extensible_predicate(
compilation_target.clone(),
key.clone(),
skeleton,
);
}
}
}
pub(super) fn incremental_compile_clause(
&mut self,
key: PredicateKey,
clause: Term,
compilation_target: CompilationTarget,
non_counted_bt: bool,
append_or_prepend: AppendOrPrepend,
) -> Result<CodeIndex, SessionError> {
let settings = match self
.wam
.indices
.get_predicate_skeleton_mut(&compilation_target, &key)
{
Some(skeleton) if !skeleton.clauses.is_empty() => CodeGenSettings {
global_clock_tick: if skeleton.core.is_dynamic {
Some(self.wam.machine_st.global_clock)
} else {
None
},
is_extensible: true,
non_counted_bt,
},
skeleton_opt => {
let settings = CodeGenSettings {
global_clock_tick: if let Some(skeleton) = skeleton_opt {
if skeleton.core.is_dynamic {
Some(self.wam.machine_st.global_clock)
} else {
None
}
} else {
None
},
is_extensible: true,
non_counted_bt,
};
let mut predicate_queue = predicate_queue![clause];
predicate_queue.compilation_target = compilation_target;
return self.compile(key, &mut predicate_queue, settings);
}
};
let atom_tbl = self.wam.machine_st.atom_tbl.clone();
let StandaloneCompileResult {
clause_code,
mut standalone_skeleton,
} = self.compile_standalone_clause(clause, settings, atom_tbl)?;
let code_len = self.wam.code_repo.code.len();
let skeleton = match self
.wam
.indices
.get_predicate_skeleton_mut(&compilation_target, &key)
{
Some(skeleton) if !skeleton.clauses.is_empty() => skeleton,
_ => unreachable!(),
};
match append_or_prepend {
AppendOrPrepend::Append => {
let clause_index_info = standalone_skeleton.clauses.pop_back().unwrap();
skeleton.clauses.push_back(clause_index_info);
skeleton.core.clause_clause_locs.push_back(code_len);
self.retraction_info
.push_record(RetractionRecord::SkeletonClausePopBack(
compilation_target.clone(),
key.clone(),
));
let result = append_compiled_clause(
&mut self.wam.code_repo.code,
clause_code,
skeleton,
&mut self.retraction_info,
self.wam.machine_st.global_clock,
);
self.push_back_to_local_predicate_skeleton(&compilation_target, &key, code_len);
let code_index =
self.get_or_insert_code_index(key.clone(), compilation_target.clone());
if let Some(new_code_ptr) = result {
set_code_index(
&mut self.retraction_info,
&compilation_target,
key,
&code_index,
new_code_ptr,
);
}
Ok(code_index)
}
AppendOrPrepend::Prepend => {
let clause_index_info = standalone_skeleton.clauses.pop_back().unwrap();
skeleton.clauses.push_front(clause_index_info);
skeleton.core.clause_clause_locs.push_front(code_len);
skeleton.core.clause_assert_margin += 1;
self.retraction_info
.push_record(RetractionRecord::SkeletonClausePopFront(
compilation_target.clone(),
key.clone(),
));
let new_code_ptr = prepend_compiled_clause(
&mut self.wam.code_repo.code,
compilation_target.clone(),
key.clone(),
clause_code,
skeleton,
&mut self.retraction_info,
self.wam.machine_st.global_clock,
);
self.push_front_to_local_predicate_skeleton(&compilation_target, &key, code_len);
let code_index =
self.get_or_insert_code_index(key.clone(), compilation_target.clone());
set_code_index(
&mut self.retraction_info,
&compilation_target,
key,
&code_index,
new_code_ptr,
);
Ok(code_index)
}
}
}
pub(super) fn retract_dynamic_clause(&mut self, key: PredicateKey, target_pos: usize) -> usize {
let skeleton = match self
.wam
.indices
.get_predicate_skeleton_mut(&self.compilation_target, &key)
{
Some(skeleton) => skeleton,
None => {
unreachable!();
}
};
let clause_loc = match skeleton.clauses[target_pos]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(index_loc) => find_inner_choice_instr(
&self.wam.code_repo.code,
skeleton.clauses[target_pos].clause_start,
index_loc,
),
None => skeleton.clauses[target_pos].clause_start,
};
match &mut self.wam.code_repo.code[clause_loc] {
Line::Choice(ChoiceInstruction::DynamicElse(_, ref mut d, _))
| Line::Choice(ChoiceInstruction::DynamicInternalElse(_, ref mut d, _)) => {
*d = Death::Finite(self.wam.machine_st.global_clock);
}
_ => unreachable!(),
}
delete_from_skeleton(
self.compilation_target.clone(),
key,
skeleton,
target_pos,
&mut self.retraction_info,
)
}
pub(super) fn retract_clause(&mut self, key: PredicateKey, target_pos: usize) -> usize {
let code_index =
self.get_or_insert_code_index(key.clone(), self.compilation_target.clone());
let skeleton = match self
.wam
.indices
.get_predicate_skeleton_mut(&self.compilation_target, &key)
{
Some(skeleton) => skeleton,
None => {
unreachable!();
}
};
let code = &mut self.wam.code_repo.code;
let lower_bound = lower_bound_of_target_clause(skeleton, target_pos);
let lower_bound_is_unindexed = !skeleton.clauses[lower_bound].opt_arg_index_key.is_some();
if target_pos == 0 || (lower_bound + 1 == target_pos && lower_bound_is_unindexed) {
// the clause preceding target_pos, if there is one, is of key type
// OptArgIndexKey::None.
match skeleton.clauses[target_pos]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(index_loc) => {
let inner_clause_start = find_inner_choice_instr(
code,
skeleton.clauses[target_pos].clause_start,
index_loc,
);
remove_index_from_subsequence(
code,
&skeleton.clauses[target_pos].opt_arg_index_key,
inner_clause_start,
&mut self.retraction_info,
);
match derelictize_try_me_else(
code,
inner_clause_start,
&mut self.retraction_info,
) {
Some(offset) => {
let instr_loc = find_inner_choice_instr(
code,
inner_clause_start + offset,
index_loc,
);
let clause_loc = blunt_leading_choice_instr(
code,
instr_loc,
&mut self.retraction_info,
);
set_switch_var_offset(
code,
index_loc,
clause_loc - index_loc,
&mut self.retraction_info,
);
self.retraction_info.push_record(
RetractionRecord::SkeletonClauseStartReplaced(
self.compilation_target.clone(),
key.clone(),
target_pos + 1,
skeleton.clauses[target_pos + 1].clause_start,
),
);
skeleton.clauses[target_pos + 1].clause_start =
skeleton.clauses[target_pos].clause_start;
let index_ptr_opt = if target_pos == 0 {
Some(IndexPtr::Index(clause_loc))
} else {
None
};
return finalize_retract(
key,
self.compilation_target.clone(),
skeleton,
code_index,
target_pos,
index_ptr_opt,
&mut self.retraction_info,
);
}
None => {
let index_ptr_opt = if target_pos > 0 {
let preceding_choice_instr_loc =
skeleton.clauses[target_pos - 1].clause_start;
remove_non_leading_clause(
code,
preceding_choice_instr_loc,
skeleton.clauses[target_pos].clause_start - 2,
&mut self.retraction_info,
)
} else {
remove_leading_unindexed_clause(
code,
skeleton.clauses[target_pos].clause_start - 2,
&mut self.retraction_info,
)
};
return finalize_retract(
key,
self.compilation_target.clone(),
skeleton,
code_index,
target_pos,
index_ptr_opt,
&mut self.retraction_info,
);
}
}
}
None => {}
}
}
let index_ptr_opt = match skeleton.clauses[lower_bound]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(target_indexing_loc)
if mergeable_indexed_subsequences(lower_bound, target_pos, skeleton) =>
{
let lower_bound_clause_start = find_inner_choice_instr(
code,
skeleton.clauses[lower_bound].clause_start,
target_indexing_loc,
);
let result;
match skeleton.clauses[target_pos + 1]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(later_indexing_loc) if later_indexing_loc < target_indexing_loc => {
let target_indexing_line = mem::replace(
&mut code[target_indexing_loc],
Line::Control(ControlInstruction::RevJmpBy(
target_indexing_loc - later_indexing_loc,
)),
);
match target_indexing_line {
Line::IndexingCode(indexing_code) => {
self.retraction_info.push_record(
RetractionRecord::ReplacedIndexingLine(
target_indexing_loc,
indexing_code,
),
);
}
_ => {}
}
result = merge_indexed_subsequences(
code,
skeleton,
lower_bound,
target_pos + 1,
&mut self.retraction_info,
);
merge_indices(
code,
later_indexing_loc,
0..target_pos - lower_bound,
&mut skeleton.clauses[lower_bound..],
&mut self.retraction_info,
);
set_switch_var_offset(
code,
later_indexing_loc,
lower_bound_clause_start - later_indexing_loc,
&mut self.retraction_info,
);
}
_ => {
result = merge_indexed_subsequences(
code,
skeleton,
lower_bound,
target_pos + 1,
&mut self.retraction_info,
);
merge_indices(
code,
target_indexing_loc,
target_pos + 1 - lower_bound..skeleton.clauses.len() - lower_bound,
&mut skeleton.clauses[lower_bound..],
&mut self.retraction_info,
);
set_switch_var_offset_to_choice_instr(
code,
target_indexing_loc,
lower_bound_clause_start - target_indexing_loc,
&mut self.retraction_info,
);
}
};
result
}
_ => {
if target_pos > 0 {
remove_index_from_subsequence(
code,
&skeleton.clauses[target_pos].opt_arg_index_key,
skeleton.clauses[target_pos].clause_start,
&mut self.retraction_info,
);
match skeleton.clauses[target_pos]
.opt_arg_index_key
.switch_on_term_loc()
{
Some(index_loc) => {
let preceding_choice_instr_loc = find_inner_choice_instr(
code,
skeleton.clauses[target_pos - 1].clause_start,
index_loc,
);
remove_non_leading_clause(
code,
preceding_choice_instr_loc,
skeleton.clauses[target_pos].clause_start,
&mut self.retraction_info,
);
match &mut code[preceding_choice_instr_loc] {
Line::Choice(ChoiceInstruction::TryMeElse(0)) => {
set_switch_var_offset(
code,
index_loc,
preceding_choice_instr_loc + 1 - index_loc,
&mut self.retraction_info,
);
}
_ => {}
}
None
}
None => {
let preceding_choice_instr_loc =
if skeleton.clauses[lower_bound].opt_arg_index_key.is_some() {
skeleton.clauses[lower_bound].clause_start - 2
} else {
skeleton.clauses[lower_bound].clause_start
};
remove_non_leading_clause(
code,
preceding_choice_instr_loc,
skeleton.clauses[target_pos].clause_start,
&mut self.retraction_info,
)
}
}
} else {
remove_leading_unindexed_clause(
code,
skeleton.clauses[target_pos].clause_start,
&mut self.retraction_info,
)
}
}
};
finalize_retract(
key,
self.compilation_target.clone(),
skeleton,
code_index,
target_pos,
index_ptr_opt,
&mut self.retraction_info,
)
}
}
impl<'a, TS: TermStream> Loader<'a, TS> {
pub(super) fn compile_clause_clauses<ClauseIter: Iterator<Item = (Term, Term)>>(
&mut self,
key: PredicateKey,
compilation_target: CompilationTarget,
clause_clauses: ClauseIter,
append_or_prepend: AppendOrPrepend,
) -> Result<(), SessionError> {
let clause_predicates = clause_clauses.map(|(head, body)| {
Term::Clause(
Cell::default(),
clause_name!("$clause"),
vec![Box::new(head), Box::new(body)],
None,
)
});
let clause_clause_compilation_target = match compilation_target {
CompilationTarget::User => CompilationTarget::Module(clause_name!("builtins")),
_ => compilation_target.clone(),
};
let mut num_clause_predicates = 0;
for clause_term in clause_predicates {
self.load_state.incremental_compile_clause(
(clause_name!("$clause"), 2),
clause_term,
clause_clause_compilation_target.clone(),
false, // non_counted_bt is false.
append_or_prepend,
)?;
num_clause_predicates += 1;
}
let locs_vec: Vec<_> = match self
.load_state
.wam
.indices
.get_predicate_skeleton_mut(&compilation_target, &key)
{
Some(skeleton) if append_or_prepend.is_append() => {
let tail_num = skeleton.core.clause_clause_locs.len() - num_clause_predicates;
skeleton.core.clause_clause_locs[tail_num..]
.iter()
.cloned()
.collect()
}
Some(skeleton) => skeleton.core.clause_clause_locs[0..num_clause_predicates]
.iter()
.cloned()
.collect(),
None => {
unreachable!()
}
};
match self.load_state.wam.indices.get_predicate_skeleton_mut(
&clause_clause_compilation_target,
&(clause_name!("$clause"), 2),
) {
Some(skeleton) if append_or_prepend.is_append() => {
for _ in 0..num_clause_predicates {
skeleton.core.clause_clause_locs.pop_back();
}
for loc in locs_vec {
skeleton.core.clause_clause_locs.push_back(loc);
}
}
Some(skeleton) => {
for _ in 0..num_clause_predicates {
skeleton.core.clause_clause_locs.pop_front();
}
for loc in locs_vec.into_iter().rev() {
skeleton.core.clause_clause_locs.push_front(loc);
}
}
None => {
unreachable!();
}
}
Ok(())
}
pub(super) fn compile_and_submit(&mut self) -> Result<(), SessionError> {
let key = self
.predicates
.first()
.and_then(|cl| {
let arity = ClauseInfo::arity(cl);
ClauseInfo::name(cl).map(|name| (name, arity))
})
.ok_or(SessionError::NamelessEntry)?;
let mut predicate_info = self
.load_state
.wam
.indices
.get_predicate_skeleton(&self.predicates.compilation_target, &key)
.map(|skeleton| skeleton.predicate_info())
.unwrap_or_default();
let local_predicate_info = self
.load_state
.wam
.indices
.get_local_predicate_skeleton(
self.load_state.compilation_target.clone(),
self.predicates.compilation_target.clone(),
self.load_state.listing_src_file_name(),
key.clone(),
)
.map(|skeleton| skeleton.predicate_info())
.unwrap_or_default();
if local_predicate_info.must_retract_local_clauses() {
self.retract_local_clauses(&key, predicate_info.is_dynamic);
}
let do_incremental_compile =
if self.load_state.compilation_target == self.predicates.compilation_target {
predicate_info.compile_incrementally()
} else {
local_predicate_info.is_multifile && predicate_info.compile_incrementally()
};
let predicates_len = self.predicates.len();
let non_counted_bt = self.non_counted_bt_preds.contains(&key);
if do_incremental_compile {
for term in self.predicates.predicates.drain(0..) {
self.load_state.incremental_compile_clause(
key.clone(),
term,
self.predicates.compilation_target.clone(),
non_counted_bt,
AppendOrPrepend::Append,
)?;
}
} else {
if self.load_state.compilation_target != self.predicates.compilation_target {
if !local_predicate_info.is_extensible {
if predicate_info.is_multifile {
println!(
"Warning: overwriting multifile predicate {}:{}/{} because \
it was not locally declared multifile.",
self.predicates.compilation_target, key.0, key.1
);
}
if let Some(skeleton) = self
.load_state
.wam
.indices
.remove_predicate_skeleton(&self.predicates.compilation_target, &key)
{
if predicate_info.is_dynamic {
let clause_clause_compilation_target =
match &self.predicates.compilation_target {
CompilationTarget::User => {
CompilationTarget::Module(clause_name!("builtins"))
}
module => module.clone(),
};
self.load_state.retract_local_clauses_by_locs(
clause_clause_compilation_target,
(clause_name!("$clause"), 2),
(0..skeleton.clauses.len()).map(Some).collect(),
false, // the builtin M:'$clause'/2 is never dynamic.
);
predicate_info.is_dynamic = false;
}
self.load_state.retraction_info.push_record(
RetractionRecord::RemovedSkeleton(
self.predicates.compilation_target.clone(),
key.clone(),
skeleton,
),
);
}
}
}
let settings = CodeGenSettings {
global_clock_tick: if predicate_info.is_dynamic {
Some(self.load_state.wam.machine_st.global_clock)
} else {
None
},
is_extensible: predicate_info.is_extensible,
non_counted_bt,
};
let code_index =
self.load_state
.compile(key.clone(), &mut self.predicates, settings)?;
if let Some(filename) = self.load_state.listing_src_file_name() {
match self.load_state.wam.indices.modules.get_mut(&filename) {
Some(ref mut module) => {
let index_ptr = code_index.get();
let code_index = module.code_dir.entry(key.clone()).or_insert(code_index);
set_code_index(
&mut self.load_state.retraction_info,
&CompilationTarget::Module(filename),
key.clone(),
&code_index,
index_ptr,
);
}
None => {}
}
}
}
if predicate_info.is_dynamic {
self.load_state.wam.machine_st.global_clock += 1;
let clauses_vec: Vec<_> = self.clause_clauses.drain(0..predicates_len).collect();
self.compile_clause_clauses(
key,
self.predicates.compilation_target.clone(),
clauses_vec.into_iter(),
AppendOrPrepend::Append,
)?;
}
Ok(())
}
}
| 35.361207 | 100 | 0.496745 |
901360ee2ab73e36d6b6840fc12db1a49bae776a | 2,082 | use std::{error::Error, fmt, io};
#[derive(Debug)]
pub enum CompressError {
IOErr(io::Error),
Overflow,
Malformed,
}
#[derive(Debug)]
pub enum DecompressError {
IOErr(io::Error),
Unsupported,
Underflow,
Malformed,
TypeMismatch,
}
impl From<io::Error> for CompressError {
fn from(err: io::Error) -> Self {
CompressError::IOErr(err)
}
}
impl fmt::Display for CompressError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
CompressError::IOErr(e) => write!(f, "{}", e),
CompressError::Overflow => write!(f, "Buffer is too small"),
CompressError::Malformed => write!(f, "Invalid arguments"),
}
}
}
impl Error for CompressError {}
impl From<io::Error> for DecompressError {
fn from(err: io::Error) -> Self {
DecompressError::IOErr(err)
}
}
impl fmt::Display for DecompressError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DecompressError::IOErr(e) => write!(f, "{}", e),
DecompressError::Unsupported => write!(f, "Unsupported image format"),
DecompressError::Malformed => write!(f, "Invalid arguments"),
DecompressError::Underflow => write!(f, "Buffer underflow detected"),
DecompressError::TypeMismatch => write!(f, "Image data doesn't match the header"),
}
}
}
impl Error for DecompressError {}
#[derive(Debug)]
pub enum HeaderErr {
IOErr(io::Error),
WrongMagic,
WrongValue(String),
}
impl From<io::Error> for HeaderErr {
fn from(err: io::Error) -> HeaderErr {
HeaderErr::IOErr(err)
}
}
impl fmt::Display for HeaderErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HeaderErr::IOErr(e) => write!(f, "{}", e),
HeaderErr::WrongMagic => write!(f, "Header doesn't contain GFWX magic"),
HeaderErr::WrongValue(e) => write!(f, "Invalid filed value in header: {}", e),
}
}
}
impl Error for HeaderErr {}
| 25.703704 | 94 | 0.588377 |
f82deeca46be4f75c45a0eb106650a6f4f2cfa14 | 229 | use crate::devices::update_signal::{ReadUpdateSignal, UpdatePlan};
#[derive(Default)]
pub struct NullUpdateSignal;
impl ReadUpdateSignal for NullUpdateSignal {
fn read_update_plan(&self) -> UpdatePlan { UpdatePlan::Any }
}
| 25.444444 | 66 | 0.768559 |
144bb612e26ed50f0868037585130b5c7d8327e3 | 4,440 | use super::*;
use std::process::Stdio;
use tokio::{
io::{AsyncBufReadExt, AsyncWriteExt, BufReader},
process::{Child, ChildStderr, ChildStdout, Command},
};
#[derive(Debug)]
pub struct LocalShell {
pub sudo: bool,
pub chroot: Option<String>,
}
#[async_trait]
impl ControlModuleTrait for LocalShell {
async fn is_connected(&self) -> Result<bool> {
Ok(true)
}
async fn connect(&self) -> Result<()> {
Ok(())
}
async fn exec(&self, cmd: &str) -> Result<(String, u32)> {
let cmd = self.build_command(self.sudo, self.chroot.as_ref().map(|s| s.as_str()), cmd);
let result = Command::new("sh")
.args(&["-c", &cmd])
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true)
.output()
.await?;
Ok((
format!(
"{}\n{}",
std::str::from_utf8(&result.stdout)?.trim_end(),
std::str::from_utf8(&result.stderr)?.trim_end(),
),
result.status.code().unwrap_or(256) as u32,
))
}
async fn exec_open(&self, cmd: &str) -> Result<ControlStream> {
let cmd = self.build_command(self.sudo, self.chroot.as_ref().map(|s| s.as_str()), cmd);
let mut child = Command::new("sh")
.args(&["-c", &cmd])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true)
.spawn()?;
let stdout = BufReader::new(child.stdout.take().unwrap());
let stderr = BufReader::new(child.stderr.take().unwrap());
Ok(ControlStream(Box::new(LocalShellStream {
child: Some(child),
stdout,
stderr,
})))
}
async fn disconnect(&self) -> Result<()> {
Ok(())
}
}
#[derive(Debug)]
pub struct LocalShellStream {
child: Option<Child>,
stdout: BufReader<ChildStdout>,
stderr: BufReader<ChildStderr>,
}
#[async_trait]
impl ControlStreamTrait for LocalShellStream {
async fn wait_for_completion(&mut self) -> Result<(String, u32)> {
if let Some(child) = self.child.take() {
let result = child.wait_with_output().await?;
Ok((
format!(
"{}\n{}",
std::str::from_utf8(&result.stdout)?.trim_end(),
std::str::from_utf8(&result.stderr)?.trim_end(),
),
result.status.code().unwrap_or(256) as u32,
))
} else {
Err(AppError::Generic(format!(
"This stream has been completed!"
)))
}
}
async fn wait_for(&mut self, ptrn: &Regex) -> Result<(String, Option<u32>)> {
if let Some(child) = &mut self.child {
let mut output = String::new();
let mut found = false;
let mut code = None;
while !found && code.is_none() {
let mut stdout_line = Default::default();
let mut stderr_line = Default::default();
tokio::select! {
val = self.stdout.read_line(&mut stdout_line) => {
if val? == 0 {
continue;
}
}
val = self.stderr.read_line(&mut stderr_line) => {
if val? == 0 {
continue;
}
}
val = child.wait() => {
code = val?.code().map(|v| v as u32);
}
}
debug!("{}{}", stdout_line, stderr_line);
if ptrn.is_match(&stdout_line) || ptrn.is_match(&stderr_line) {
found = true;
}
output.push_str(&stdout_line);
output.push_str(&stderr_line);
}
Ok((output, code))
} else {
Err(AppError::Generic("Child process unavailable!".into()))
}
}
async fn sendline(&mut self, data: &str) -> Result<()> {
if let Some(child) = &mut self.child {
if let Some(stdin) = &mut child.stdin {
stdin.write(data.as_bytes()).await?;
return Ok(());
}
}
Ok(())
}
}
| 31.048951 | 95 | 0.470045 |
ab4fb9607ca002b4024d49d3ccf84558797c37ff | 114,149 | //! Candidate selection. See the [rustc dev guide] for more information on how this works.
//!
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
use self::EvaluationResult::*;
use self::SelectionCandidate::*;
use super::coherence::{self, Conflict};
use super::const_evaluatable;
use super::project;
use super::project::normalize_with_depth_to;
use super::project::ProjectionTyObligation;
use super::util;
use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
use super::wf;
use super::DerivedObligationCause;
use super::Normalized;
use super::Obligation;
use super::ObligationCauseCode;
use super::Selection;
use super::SelectionResult;
use super::TraitQueryMode;
use super::{ErrorReporting, Overflow, SelectionError};
use super::{ObligationCause, PredicateObligation, TraitObligation};
use crate::infer::{InferCtxt, InferOk, TypeFreshener};
use crate::traits::error_reporting::InferCtxtExt;
use crate::traits::project::ProjectionCacheKeyExt;
use crate::traits::ProjectionCacheKey;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::ErrorReported;
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_middle::dep_graph::{DepKind, DepNodeIndex};
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::thir::abstract_const::NotConstEvaluatable;
use rustc_middle::ty::fast_reject::{self, SimplifyParams, StripReferences};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::relate::TypeRelation;
use rustc_middle::ty::subst::{GenericArgKind, Subst, SubstsRef};
use rustc_middle::ty::{self, PolyProjectionPredicate, ToPolyTraitRef, ToPredicate};
use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable};
use rustc_span::symbol::sym;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::fmt::{self, Display};
use std::iter;
pub use rustc_middle::traits::select::*;
mod candidate_assembly;
mod confirmation;
#[derive(Clone, Debug)]
pub enum IntercrateAmbiguityCause {
DownstreamCrate { trait_desc: String, self_desc: Option<String> },
UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String> },
ReservationImpl { message: String },
}
impl IntercrateAmbiguityCause {
/// Emits notes when the overlap is caused by complex intercrate ambiguities.
/// See #23980 for details.
pub fn add_intercrate_ambiguity_hint(&self, err: &mut rustc_errors::DiagnosticBuilder<'_>) {
err.note(&self.intercrate_ambiguity_hint());
}
pub fn intercrate_ambiguity_hint(&self) -> String {
match self {
IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } => {
let self_desc = if let Some(ty) = self_desc {
format!(" for type `{}`", ty)
} else {
String::new()
};
format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
}
IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } => {
let self_desc = if let Some(ty) = self_desc {
format!(" for type `{}`", ty)
} else {
String::new()
};
format!(
"upstream crates may add a new impl of trait `{}`{} \
in future versions",
trait_desc, self_desc
)
}
IntercrateAmbiguityCause::ReservationImpl { message } => message.clone(),
}
}
}
pub struct SelectionContext<'cx, 'tcx> {
infcx: &'cx InferCtxt<'cx, 'tcx>,
/// Freshener used specifically for entries on the obligation
/// stack. This ensures that all entries on the stack at one time
/// will have the same set of placeholder entries, which is
/// important for checking for trait bounds that recursively
/// require themselves.
freshener: TypeFreshener<'cx, 'tcx>,
/// If `true`, indicates that the evaluation should be conservative
/// and consider the possibility of types outside this crate.
/// This comes up primarily when resolving ambiguity. Imagine
/// there is some trait reference `$0: Bar` where `$0` is an
/// inference variable. If `intercrate` is true, then we can never
/// say for sure that this reference is not implemented, even if
/// there are *no impls at all for `Bar`*, because `$0` could be
/// bound to some type that in a downstream crate that implements
/// `Bar`. This is the suitable mode for coherence. Elsewhere,
/// though, we set this to false, because we are only interested
/// in types that the user could actually have written --- in
/// other words, we consider `$0: Bar` to be unimplemented if
/// there is no type that the user could *actually name* that
/// would satisfy it. This avoids crippling inference, basically.
intercrate: bool,
intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
/// Controls whether or not to filter out negative impls when selecting.
/// This is used in librustdoc to distinguish between the lack of an impl
/// and a negative impl
allow_negative_impls: bool,
/// The mode that trait queries run in, which informs our error handling
/// policy. In essence, canonicalized queries need their errors propagated
/// rather than immediately reported because we do not have accurate spans.
query_mode: TraitQueryMode,
}
// A stack that walks back up the stack frame.
struct TraitObligationStack<'prev, 'tcx> {
obligation: &'prev TraitObligation<'tcx>,
/// The trait predicate from `obligation` but "freshened" with the
/// selection-context's freshener. Used to check for recursion.
fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
/// Starts out equal to `depth` -- if, during evaluation, we
/// encounter a cycle, then we will set this flag to the minimum
/// depth of that cycle for all participants in the cycle. These
/// participants will then forego caching their results. This is
/// not the most efficient solution, but it addresses #60010. The
/// problem we are trying to prevent:
///
/// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
/// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
/// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
///
/// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
/// is `EvaluatedToOk`; this is because they were only considered
/// ok on the premise that if `A: AutoTrait` held, but we indeed
/// encountered a problem (later on) with `A: AutoTrait. So we
/// currently set a flag on the stack node for `B: AutoTrait` (as
/// well as the second instance of `A: AutoTrait`) to suppress
/// caching.
///
/// This is a simple, targeted fix. A more-performant fix requires
/// deeper changes, but would permit more caching: we could
/// basically defer caching until we have fully evaluated the
/// tree, and then cache the entire tree at once. In any case, the
/// performance impact here shouldn't be so horrible: every time
/// this is hit, we do cache at least one trait, so we only
/// evaluate each member of a cycle up to N times, where N is the
/// length of the cycle. This means the performance impact is
/// bounded and we shouldn't have any terrible worst-cases.
reached_depth: Cell<usize>,
previous: TraitObligationStackList<'prev, 'tcx>,
/// The number of parent frames plus one (thus, the topmost frame has depth 1).
depth: usize,
/// The depth-first number of this node in the search graph -- a
/// pre-order index. Basically, a freshly incremented counter.
dfn: usize,
}
struct SelectionCandidateSet<'tcx> {
// A list of candidates that definitely apply to the current
// obligation (meaning: types unify).
vec: Vec<SelectionCandidate<'tcx>>,
// If `true`, then there were candidates that might or might
// not have applied, but we couldn't tell. This occurs when some
// of the input types are type variables, in which case there are
// various "builtin" rules that might or might not trigger.
ambiguous: bool,
}
#[derive(PartialEq, Eq, Debug, Clone)]
struct EvaluatedCandidate<'tcx> {
candidate: SelectionCandidate<'tcx>,
evaluation: EvaluationResult,
}
/// When does the builtin impl for `T: Trait` apply?
#[derive(Debug)]
enum BuiltinImplConditions<'tcx> {
/// The impl is conditional on `T1, T2, ...: Trait`.
Where(ty::Binder<'tcx, Vec<Ty<'tcx>>>),
/// There is no built-in impl. There may be some other
/// candidate (a where-clause or user-defined impl).
None,
/// It is unknown whether there is an impl.
Ambiguous,
}
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
SelectionContext {
infcx,
freshener: infcx.freshener_keep_static(),
intercrate: false,
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode: TraitQueryMode::Standard,
}
}
pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
SelectionContext {
infcx,
freshener: infcx.freshener_keep_static(),
intercrate: true,
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode: TraitQueryMode::Standard,
}
}
pub fn with_negative(
infcx: &'cx InferCtxt<'cx, 'tcx>,
allow_negative_impls: bool,
) -> SelectionContext<'cx, 'tcx> {
debug!(?allow_negative_impls, "with_negative");
SelectionContext {
infcx,
freshener: infcx.freshener_keep_static(),
intercrate: false,
intercrate_ambiguity_causes: None,
allow_negative_impls,
query_mode: TraitQueryMode::Standard,
}
}
pub fn with_query_mode(
infcx: &'cx InferCtxt<'cx, 'tcx>,
query_mode: TraitQueryMode,
) -> SelectionContext<'cx, 'tcx> {
debug!(?query_mode, "with_query_mode");
SelectionContext {
infcx,
freshener: infcx.freshener_keep_static(),
intercrate: false,
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode,
}
}
/// Enables tracking of intercrate ambiguity causes. These are
/// used in coherence to give improved diagnostics. We don't do
/// this until we detect a coherence error because it can lead to
/// false overflow results (#47139) and because it costs
/// computation time.
pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
assert!(self.intercrate);
assert!(self.intercrate_ambiguity_causes.is_none());
self.intercrate_ambiguity_causes = Some(vec![]);
debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
}
/// Gets the intercrate ambiguity causes collected since tracking
/// was enabled and disables tracking at the same time. If
/// tracking is not enabled, just returns an empty vector.
pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
assert!(self.intercrate);
self.intercrate_ambiguity_causes.take().unwrap_or_default()
}
pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
self.infcx
}
pub fn tcx(&self) -> TyCtxt<'tcx> {
self.infcx.tcx
}
pub fn is_intercrate(&self) -> bool {
self.intercrate
}
///////////////////////////////////////////////////////////////////////////
// Selection
//
// The selection phase tries to identify *how* an obligation will
// be resolved. For example, it will identify which impl or
// parameter bound is to be used. The process can be inconclusive
// if the self type in the obligation is not fully inferred. Selection
// can result in an error in one of two ways:
//
// 1. If no applicable impl or parameter bound can be found.
// 2. If the output type parameters in the obligation do not match
// those specified by the impl/bound. For example, if the obligation
// is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
// `impl<T> Iterable<T> for Vec<T>`, than an error would result.
/// Attempts to satisfy the obligation. If successful, this will affect the surrounding
/// type environment by performing unification.
#[instrument(level = "debug", skip(self))]
pub fn select(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, Selection<'tcx>> {
let candidate = match self.select_from_obligation(obligation) {
Err(SelectionError::Overflow) => {
// In standard mode, overflow must have been caught and reported
// earlier.
assert!(self.query_mode == TraitQueryMode::Canonical);
return Err(SelectionError::Overflow);
}
Err(SelectionError::Ambiguous(_)) => {
return Ok(None);
}
Err(e) => {
return Err(e);
}
Ok(None) => {
return Ok(None);
}
Ok(Some(candidate)) => candidate,
};
match self.confirm_candidate(obligation, candidate) {
Err(SelectionError::Overflow) => {
assert!(self.query_mode == TraitQueryMode::Canonical);
Err(SelectionError::Overflow)
}
Err(e) => Err(e),
Ok(candidate) => {
debug!(?candidate, "confirmed");
Ok(Some(candidate))
}
}
}
crate fn select_from_obligation(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
debug_assert!(!obligation.predicate.has_escaping_bound_vars());
let pec = &ProvisionalEvaluationCache::default();
let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
self.candidate_from_obligation(&stack)
}
///////////////////////////////////////////////////////////////////////////
// EVALUATION
//
// Tests whether an obligation can be selected or whether an impl
// can be applied to particular types. It skips the "confirmation"
// step and hence completely ignores output type parameters.
//
// The result is "true" if the obligation *may* hold and "false" if
// we can be sure it does not.
/// Evaluates whether the obligation `obligation` can be satisfied (by any means).
pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
debug!(?obligation, "predicate_may_hold_fatal");
// This fatal query is a stopgap that should only be used in standard mode,
// where we do not expect overflow to be propagated.
assert!(self.query_mode == TraitQueryMode::Standard);
self.evaluate_root_obligation(obligation)
.expect("Overflow should be caught earlier in standard query mode")
.may_apply()
}
/// Evaluates whether the obligation `obligation` can be satisfied
/// and returns an `EvaluationResult`. This is meant for the
/// *initial* call.
pub fn evaluate_root_obligation(
&mut self,
obligation: &PredicateObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
self.evaluation_probe(|this| {
this.evaluate_predicate_recursively(
TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
obligation.clone(),
)
})
}
fn evaluation_probe(
&mut self,
op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
) -> Result<EvaluationResult, OverflowError> {
self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
let result = op(self)?;
match self.infcx.leak_check(true, snapshot) {
Ok(()) => {}
Err(_) => return Ok(EvaluatedToErr),
}
match self.infcx.region_constraints_added_in_snapshot(snapshot) {
None => Ok(result),
Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)),
}
})
}
/// Evaluates the predicates in `predicates` recursively. Note that
/// this applies projections in the predicates, and therefore
/// is run within an inference probe.
#[instrument(skip(self, stack), level = "debug")]
fn evaluate_predicates_recursively<'o, I>(
&mut self,
stack: TraitObligationStackList<'o, 'tcx>,
predicates: I,
) -> Result<EvaluationResult, OverflowError>
where
I: IntoIterator<Item = PredicateObligation<'tcx>> + std::fmt::Debug,
{
let mut result = EvaluatedToOk;
for obligation in predicates {
let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
if let EvaluatedToErr = eval {
// fast-path - EvaluatedToErr is the top of the lattice,
// so we don't need to look on the other predicates.
return Ok(EvaluatedToErr);
} else {
result = cmp::max(result, eval);
}
}
Ok(result)
}
#[instrument(
level = "debug",
skip(self, previous_stack),
fields(previous_stack = ?previous_stack.head())
)]
fn evaluate_predicate_recursively<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
obligation: PredicateObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
// `previous_stack` stores a `TraitObligation`, while `obligation` is
// a `PredicateObligation`. These are distinct types, so we can't
// use any `Option` combinator method that would force them to be
// the same.
match previous_stack.head() {
Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
None => self.check_recursion_limit(&obligation, &obligation)?,
}
let result = ensure_sufficient_stack(|| {
let bound_predicate = obligation.predicate.kind();
match bound_predicate.skip_binder() {
ty::PredicateKind::Trait(t) => {
let t = bound_predicate.rebind(t);
debug_assert!(!t.has_escaping_bound_vars());
let obligation = obligation.with(t);
self.evaluate_trait_predicate_recursively(previous_stack, obligation)
}
ty::PredicateKind::Subtype(p) => {
let p = bound_predicate.rebind(p);
// Does this code ever run?
match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
Some(Ok(InferOk { mut obligations, .. })) => {
self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
self.evaluate_predicates_recursively(
previous_stack,
obligations.into_iter(),
)
}
Some(Err(_)) => Ok(EvaluatedToErr),
None => Ok(EvaluatedToAmbig),
}
}
ty::PredicateKind::Coerce(p) => {
let p = bound_predicate.rebind(p);
// Does this code ever run?
match self.infcx.coerce_predicate(&obligation.cause, obligation.param_env, p) {
Some(Ok(InferOk { mut obligations, .. })) => {
self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
self.evaluate_predicates_recursively(
previous_stack,
obligations.into_iter(),
)
}
Some(Err(_)) => Ok(EvaluatedToErr),
None => Ok(EvaluatedToAmbig),
}
}
ty::PredicateKind::WellFormed(arg) => match wf::obligations(
self.infcx,
obligation.param_env,
obligation.cause.body_id,
obligation.recursion_depth + 1,
arg,
obligation.cause.span,
) {
Some(mut obligations) => {
self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
self.evaluate_predicates_recursively(previous_stack, obligations)
}
None => Ok(EvaluatedToAmbig),
},
ty::PredicateKind::TypeOutlives(pred) => {
// A global type with no late-bound regions can only
// contain the "'static" lifetime (any other lifetime
// would either be late-bound or local), so it is guaranteed
// to outlive any other lifetime
if pred.0.is_global() && !pred.0.has_late_bound_regions() {
Ok(EvaluatedToOk)
} else {
Ok(EvaluatedToOkModuloRegions)
}
}
ty::PredicateKind::RegionOutlives(..) => {
// We do not consider region relationships when evaluating trait matches.
Ok(EvaluatedToOkModuloRegions)
}
ty::PredicateKind::ObjectSafe(trait_def_id) => {
if self.tcx().is_object_safe(trait_def_id) {
Ok(EvaluatedToOk)
} else {
Ok(EvaluatedToErr)
}
}
ty::PredicateKind::Projection(data) => {
let data = bound_predicate.rebind(data);
let project_obligation = obligation.with(data);
match project::poly_project_and_unify_type(self, &project_obligation) {
Ok(Ok(Some(mut subobligations))) => {
'compute_res: {
// If we've previously marked this projection as 'complete', thne
// use the final cached result (either `EvaluatedToOk` or
// `EvaluatedToOkModuloRegions`), and skip re-evaluating the
// sub-obligations.
if let Some(key) =
ProjectionCacheKey::from_poly_projection_predicate(self, data)
{
if let Some(cached_res) = self
.infcx
.inner
.borrow_mut()
.projection_cache()
.is_complete(key)
{
break 'compute_res Ok(cached_res);
}
}
self.add_depth(
subobligations.iter_mut(),
obligation.recursion_depth,
);
let res = self.evaluate_predicates_recursively(
previous_stack,
subobligations,
);
if let Ok(res) = res {
if res == EvaluatedToOk || res == EvaluatedToOkModuloRegions {
if let Some(key) =
ProjectionCacheKey::from_poly_projection_predicate(
self, data,
)
{
// If the result is something that we can cache, then mark this
// entry as 'complete'. This will allow us to skip evaluating the
// suboligations at all the next time we evaluate the projection
// predicate.
self.infcx
.inner
.borrow_mut()
.projection_cache()
.complete(key, res);
}
}
}
res
}
}
Ok(Ok(None)) => Ok(EvaluatedToAmbig),
Ok(Err(project::InProgress)) => Ok(EvaluatedToRecur),
Err(_) => Ok(EvaluatedToErr),
}
}
ty::PredicateKind::ClosureKind(_, closure_substs, kind) => {
match self.infcx.closure_kind(closure_substs) {
Some(closure_kind) => {
if closure_kind.extends(kind) {
Ok(EvaluatedToOk)
} else {
Ok(EvaluatedToErr)
}
}
None => Ok(EvaluatedToAmbig),
}
}
ty::PredicateKind::ConstEvaluatable(uv) => {
match const_evaluatable::is_const_evaluatable(
self.infcx,
uv,
obligation.param_env,
obligation.cause.span,
) {
Ok(()) => Ok(EvaluatedToOk),
Err(NotConstEvaluatable::MentionsInfer) => Ok(EvaluatedToAmbig),
Err(NotConstEvaluatable::MentionsParam) => Ok(EvaluatedToErr),
Err(_) => Ok(EvaluatedToErr),
}
}
ty::PredicateKind::ConstEquate(c1, c2) => {
debug!(?c1, ?c2, "evaluate_predicate_recursively: equating consts");
if self.tcx().features().generic_const_exprs {
// FIXME: we probably should only try to unify abstract constants
// if the constants depend on generic parameters.
//
// Let's just see where this breaks :shrug:
if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
(c1.val, c2.val)
{
if self.infcx.try_unify_abstract_consts(a.shrink(), b.shrink()) {
return Ok(EvaluatedToOk);
}
}
}
let evaluate = |c: &'tcx ty::Const<'tcx>| {
if let ty::ConstKind::Unevaluated(unevaluated) = c.val {
self.infcx
.const_eval_resolve(
obligation.param_env,
unevaluated,
Some(obligation.cause.span),
)
.map(|val| ty::Const::from_value(self.tcx(), val, c.ty))
} else {
Ok(c)
}
};
match (evaluate(c1), evaluate(c2)) {
(Ok(c1), Ok(c2)) => {
match self
.infcx()
.at(&obligation.cause, obligation.param_env)
.eq(c1, c2)
{
Ok(_) => Ok(EvaluatedToOk),
Err(_) => Ok(EvaluatedToErr),
}
}
(Err(ErrorHandled::Reported(ErrorReported)), _)
| (_, Err(ErrorHandled::Reported(ErrorReported))) => Ok(EvaluatedToErr),
(Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => {
span_bug!(
obligation.cause.span(self.tcx()),
"ConstEquate: const_eval_resolve returned an unexpected error"
)
}
(Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
if c1.has_infer_types_or_consts() || c2.has_infer_types_or_consts() {
Ok(EvaluatedToAmbig)
} else {
// Two different constants using generic parameters ~> error.
Ok(EvaluatedToErr)
}
}
}
}
ty::PredicateKind::TypeWellFormedFromEnv(..) => {
bug!("TypeWellFormedFromEnv is only used for chalk")
}
}
});
debug!("finished: {:?} from {:?}", result, obligation);
result
}
#[instrument(skip(self, previous_stack), level = "debug")]
fn evaluate_trait_predicate_recursively<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
mut obligation: TraitObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
if !self.intercrate
&& obligation.is_global()
&& obligation.param_env.caller_bounds().iter().all(|bound| bound.needs_subst())
{
// If a param env has no global bounds, global obligations do not
// depend on its particular value in order to work, so we can clear
// out the param env and get better caching.
debug!("in global");
obligation.param_env = obligation.param_env.without_caller_bounds();
}
let stack = self.push_stack(previous_stack, &obligation);
let mut fresh_trait_pred = stack.fresh_trait_pred;
let mut param_env = obligation.param_env;
fresh_trait_pred = fresh_trait_pred.map_bound(|mut pred| {
pred.remap_constness(self.tcx(), &mut param_env);
pred
});
debug!(?fresh_trait_pred);
if let Some(result) = self.check_evaluation_cache(param_env, fresh_trait_pred) {
debug!(?result, "CACHE HIT");
return Ok(result);
}
if let Some(result) = stack.cache().get_provisional(fresh_trait_pred) {
debug!(?result, "PROVISIONAL CACHE HIT");
stack.update_reached_depth(result.reached_depth);
return Ok(result.result);
}
// Check if this is a match for something already on the
// stack. If so, we don't want to insert the result into the
// main cache (it is cycle dependent) nor the provisional
// cache (which is meant for things that have completed but
// for a "backedge" -- this result *is* the backedge).
if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
return Ok(cycle_result);
}
let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
let result = result?;
if !result.must_apply_modulo_regions() {
stack.cache().on_failure(stack.dfn);
}
let reached_depth = stack.reached_depth.get();
if reached_depth >= stack.depth {
debug!(?result, "CACHE MISS");
self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result);
stack.cache().on_completion(
stack.dfn,
|fresh_trait_pred, provisional_result, provisional_dep_node| {
// Create a new `DepNode` that has dependencies on:
// * The `DepNode` for the original evaluation that resulted in a provisional cache
// entry being crated
// * The `DepNode` for the *current* evaluation, which resulted in us completing
// provisional caches entries and inserting them into the evaluation cache
//
// This ensures that when a query reads this entry from the evaluation cache,
// it will end up (transitively) dependening on all of the incr-comp dependencies
// created during the evaluation of this trait. For example, evaluating a trait
// will usually require us to invoke `type_of(field_def_id)` to determine the
// constituent types, and we want any queries reading from this evaluation
// cache entry to end up with a transitive `type_of(field_def_id`)` dependency.
//
// By using `in_task`, we're also creating an edge from the *current* query
// to the newly-created `combined_dep_node`. This is probably redundant,
// but it's better to add too many dep graph edges than to add too few
// dep graph edges.
let ((), combined_dep_node) = self.in_task(|this| {
this.tcx().dep_graph.read_index(provisional_dep_node);
this.tcx().dep_graph.read_index(dep_node);
});
self.insert_evaluation_cache(
param_env,
fresh_trait_pred,
combined_dep_node,
provisional_result.max(result),
);
},
);
} else {
debug!(?result, "PROVISIONAL");
debug!(
"caching provisionally because {:?} \
is a cycle participant (at depth {}, reached depth {})",
fresh_trait_pred, stack.depth, reached_depth,
);
stack.cache().insert_provisional(
stack.dfn,
reached_depth,
fresh_trait_pred,
result,
dep_node,
);
}
Ok(result)
}
/// If there is any previous entry on the stack that precisely
/// matches this obligation, then we can assume that the
/// obligation is satisfied for now (still all other conditions
/// must be met of course). One obvious case this comes up is
/// marker traits like `Send`. Think of a linked list:
///
/// struct List<T> { data: T, next: Option<Box<List<T>>> }
///
/// `Box<List<T>>` will be `Send` if `T` is `Send` and
/// `Option<Box<List<T>>>` is `Send`, and in turn
/// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
/// `Send`.
///
/// Note that we do this comparison using the `fresh_trait_ref`
/// fields. Because these have all been freshened using
/// `self.freshener`, we can be sure that (a) this will not
/// affect the inferencer state and (b) that if we see two
/// fresh regions with the same index, they refer to the same
/// unbound type variable.
fn check_evaluation_cycle(
&mut self,
stack: &TraitObligationStack<'_, 'tcx>,
) -> Option<EvaluationResult> {
if let Some(cycle_depth) = stack
.iter()
.skip(1) // Skip top-most frame.
.find(|prev| {
stack.obligation.param_env == prev.obligation.param_env
&& stack.fresh_trait_pred == prev.fresh_trait_pred
})
.map(|stack| stack.depth)
{
debug!("evaluate_stack --> recursive at depth {}", cycle_depth);
// If we have a stack like `A B C D E A`, where the top of
// the stack is the final `A`, then this will iterate over
// `A, E, D, C, B` -- i.e., all the participants apart
// from the cycle head. We mark them as participating in a
// cycle. This suppresses caching for those nodes. See
// `in_cycle` field for more details.
stack.update_reached_depth(cycle_depth);
// Subtle: when checking for a coinductive cycle, we do
// not compare using the "freshened trait refs" (which
// have erased regions) but rather the fully explicit
// trait refs. This is important because it's only a cycle
// if the regions match exactly.
let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
let tcx = self.tcx();
let cycle = cycle.map(|stack| stack.obligation.predicate.to_predicate(tcx));
if self.coinductive_match(cycle) {
debug!("evaluate_stack --> recursive, coinductive");
Some(EvaluatedToOk)
} else {
debug!("evaluate_stack --> recursive, inductive");
Some(EvaluatedToRecur)
}
} else {
None
}
}
fn evaluate_stack<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> Result<EvaluationResult, OverflowError> {
// In intercrate mode, whenever any of the generics are unbound,
// there can always be an impl. Even if there are no impls in
// this crate, perhaps the type would be unified with
// something from another crate that does provide an impl.
//
// In intra mode, we must still be conservative. The reason is
// that we want to avoid cycles. Imagine an impl like:
//
// impl<T:Eq> Eq for Vec<T>
//
// and a trait reference like `$0 : Eq` where `$0` is an
// unbound variable. When we evaluate this trait-reference, we
// will unify `$0` with `Vec<$1>` (for some fresh variable
// `$1`), on the condition that `$1 : Eq`. We will then wind
// up with many candidates (since that are other `Eq` impls
// that apply) and try to winnow things down. This results in
// a recursive evaluation that `$1 : Eq` -- as you can
// imagine, this is just where we started. To avoid that, we
// check for unbound variables and return an ambiguous (hence possible)
// match if we've seen this trait before.
//
// This suffices to allow chains like `FnMut` implemented in
// terms of `Fn` etc, but we could probably make this more
// precise still.
let unbound_input_types =
stack.fresh_trait_pred.skip_binder().trait_ref.substs.types().any(|ty| ty.is_fresh());
if stack.obligation.polarity() != ty::ImplPolarity::Negative {
// This check was an imperfect workaround for a bug in the old
// intercrate mode; it should be removed when that goes away.
if unbound_input_types && self.intercrate {
debug!("evaluate_stack --> unbound argument, intercrate --> ambiguous",);
// Heuristics: show the diagnostics when there are no candidates in crate.
if self.intercrate_ambiguity_causes.is_some() {
debug!("evaluate_stack: intercrate_ambiguity_causes is some");
if let Ok(candidate_set) = self.assemble_candidates(stack) {
if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
let self_ty = trait_ref.self_ty();
let cause = with_no_trimmed_paths(|| {
IntercrateAmbiguityCause::DownstreamCrate {
trait_desc: trait_ref.print_only_trait_path().to_string(),
self_desc: if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
},
}
});
debug!(?cause, "evaluate_stack: pushing cause");
self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
}
}
}
return Ok(EvaluatedToAmbig);
}
}
if unbound_input_types
&& stack.iter().skip(1).any(|prev| {
stack.obligation.param_env == prev.obligation.param_env
&& self.match_fresh_trait_refs(
stack.fresh_trait_pred,
prev.fresh_trait_pred,
prev.obligation.param_env,
)
})
{
debug!("evaluate_stack --> unbound argument, recursive --> giving up",);
return Ok(EvaluatedToUnknown);
}
match self.candidate_from_obligation(stack) {
Ok(Some(c)) => self.evaluate_candidate(stack, &c),
Err(SelectionError::Ambiguous(_)) => Ok(EvaluatedToAmbig),
Ok(None) => Ok(EvaluatedToAmbig),
Err(Overflow) => Err(OverflowError::Canonical),
Err(ErrorReporting) => Err(OverflowError::ErrorReporting),
Err(..) => Ok(EvaluatedToErr),
}
}
/// For defaulted traits, we use a co-inductive strategy to solve, so
/// that recursion is ok. This routine returns `true` if the top of the
/// stack (`cycle[0]`):
///
/// - is a defaulted trait,
/// - it also appears in the backtrace at some position `X`,
/// - all the predicates at positions `X..` between `X` and the top are
/// also defaulted traits.
pub fn coinductive_match<I>(&mut self, mut cycle: I) -> bool
where
I: Iterator<Item = ty::Predicate<'tcx>>,
{
cycle.all(|predicate| self.coinductive_predicate(predicate))
}
fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool {
let result = match predicate.kind().skip_binder() {
ty::PredicateKind::Trait(ref data) => self.tcx().trait_is_auto(data.def_id()),
_ => false,
};
debug!(?predicate, ?result, "coinductive_predicate");
result
}
/// Further evaluates `candidate` to decide whether all type parameters match and whether nested
/// obligations are met. Returns whether `candidate` remains viable after this further
/// scrutiny.
#[instrument(
level = "debug",
skip(self, stack),
fields(depth = stack.obligation.recursion_depth)
)]
fn evaluate_candidate<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
candidate: &SelectionCandidate<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
let mut result = self.evaluation_probe(|this| {
let candidate = (*candidate).clone();
match this.confirm_candidate(stack.obligation, candidate) {
Ok(selection) => {
debug!(?selection);
this.evaluate_predicates_recursively(
stack.list(),
selection.nested_obligations().into_iter(),
)
}
Err(..) => Ok(EvaluatedToErr),
}
})?;
// If we erased any lifetimes, then we want to use
// `EvaluatedToOkModuloRegions` instead of `EvaluatedToOk`
// as your final result. The result will be cached using
// the freshened trait predicate as a key, so we need
// our result to be correct by *any* choice of original lifetimes,
// not just the lifetime choice for this particular (non-erased)
// predicate.
// See issue #80691
if stack.fresh_trait_pred.has_erased_regions() {
result = result.max(EvaluatedToOkModuloRegions);
}
debug!(?result);
Ok(result)
}
fn check_evaluation_cache(
&self,
param_env: ty::ParamEnv<'tcx>,
trait_pred: ty::PolyTraitPredicate<'tcx>,
) -> Option<EvaluationResult> {
// Neither the global nor local cache is aware of intercrate
// mode, so don't do any caching. In particular, we might
// re-use the same `InferCtxt` with both an intercrate
// and non-intercrate `SelectionContext`
if self.intercrate {
return None;
}
let tcx = self.tcx();
if self.can_use_global_caches(param_env) {
if let Some(res) = tcx.evaluation_cache.get(¶m_env.and(trait_pred), tcx) {
return Some(res);
}
}
self.infcx.evaluation_cache.get(¶m_env.and(trait_pred), tcx)
}
fn insert_evaluation_cache(
&mut self,
param_env: ty::ParamEnv<'tcx>,
trait_pred: ty::PolyTraitPredicate<'tcx>,
dep_node: DepNodeIndex,
result: EvaluationResult,
) {
// Avoid caching results that depend on more than just the trait-ref
// - the stack can create recursion.
if result.is_stack_dependent() {
return;
}
// Neither the global nor local cache is aware of intercrate
// mode, so don't do any caching. In particular, we might
// re-use the same `InferCtxt` with both an intercrate
// and non-intercrate `SelectionContext`
if self.intercrate {
return;
}
if self.can_use_global_caches(param_env) {
if !trait_pred.needs_infer() {
debug!(?trait_pred, ?result, "insert_evaluation_cache global");
// This may overwrite the cache with the same value
// FIXME: Due to #50507 this overwrites the different values
// This should be changed to use HashMapExt::insert_same
// when that is fixed
self.tcx().evaluation_cache.insert(param_env.and(trait_pred), dep_node, result);
return;
}
}
debug!(?trait_pred, ?result, "insert_evaluation_cache");
self.infcx.evaluation_cache.insert(param_env.and(trait_pred), dep_node, result);
}
/// For various reasons, it's possible for a subobligation
/// to have a *lower* recursion_depth than the obligation used to create it.
/// Projection sub-obligations may be returned from the projection cache,
/// which results in obligations with an 'old' `recursion_depth`.
/// Additionally, methods like `InferCtxt.subtype_predicate` produce
/// subobligations without taking in a 'parent' depth, causing the
/// generated subobligations to have a `recursion_depth` of `0`.
///
/// To ensure that obligation_depth never decreases, we force all subobligations
/// to have at least the depth of the original obligation.
fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(
&self,
it: I,
min_depth: usize,
) {
it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
}
fn check_recursion_depth<T: Display + TypeFoldable<'tcx>>(
&self,
depth: usize,
error_obligation: &Obligation<'tcx, T>,
) -> Result<(), OverflowError> {
if !self.infcx.tcx.recursion_limit().value_within_limit(depth) {
match self.query_mode {
TraitQueryMode::Standard => {
if self.infcx.is_tainted_by_errors() {
return Err(OverflowError::ErrorReporting);
}
self.infcx.report_overflow_error(error_obligation, true);
}
TraitQueryMode::Canonical => {
return Err(OverflowError::Canonical);
}
}
}
Ok(())
}
/// Checks that the recursion limit has not been exceeded.
///
/// The weird return type of this function allows it to be used with the `try` (`?`)
/// operator within certain functions.
#[inline(always)]
fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
&self,
obligation: &Obligation<'tcx, T>,
error_obligation: &Obligation<'tcx, V>,
) -> Result<(), OverflowError> {
self.check_recursion_depth(obligation.recursion_depth, error_obligation)
}
fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
where
OP: FnOnce(&mut Self) -> R,
{
let (result, dep_node) =
self.tcx().dep_graph.with_anon_task(self.tcx(), DepKind::TraitSelect, || op(self));
self.tcx().dep_graph.read_index(dep_node);
(result, dep_node)
}
/// filter_impls filters constant trait obligations and candidates that have a positive impl
/// for a negative goal and a negative impl for a positive goal
#[instrument(level = "debug", skip(self))]
fn filter_impls(
&mut self,
candidates: Vec<SelectionCandidate<'tcx>>,
obligation: &TraitObligation<'tcx>,
) -> Vec<SelectionCandidate<'tcx>> {
let tcx = self.tcx();
let mut result = Vec::with_capacity(candidates.len());
for candidate in candidates {
// Respect const trait obligations
if obligation.is_const() {
match candidate {
// const impl
ImplCandidate(def_id)
if tcx.impl_constness(def_id) == hir::Constness::Const => {}
// const param
ParamCandidate(trait_pred)
if trait_pred.skip_binder().constness
== ty::BoundConstness::ConstIfConst => {}
// auto trait impl
AutoImplCandidate(..) => {}
// generator, this will raise error in other places
// or ignore error with const_async_blocks feature
GeneratorCandidate => {}
// FnDef where the function is const
FnPointerCandidate { is_const: true } => {}
ConstDropCandidate(_) => {}
_ => {
// reject all other types of candidates
continue;
}
}
}
if let ImplCandidate(def_id) = candidate {
if ty::ImplPolarity::Reservation == tcx.impl_polarity(def_id)
|| obligation.polarity() == tcx.impl_polarity(def_id)
|| self.allow_negative_impls
{
result.push(candidate);
}
} else {
result.push(candidate);
}
}
result
}
/// filter_reservation_impls filter reservation impl for any goal as ambiguous
#[instrument(level = "debug", skip(self))]
fn filter_reservation_impls(
&mut self,
candidate: SelectionCandidate<'tcx>,
obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
let tcx = self.tcx();
// Treat reservation impls as ambiguity.
if let ImplCandidate(def_id) = candidate {
if let ty::ImplPolarity::Reservation = tcx.impl_polarity(def_id) {
if let Some(intercrate_ambiguity_clauses) = &mut self.intercrate_ambiguity_causes {
let attrs = tcx.get_attrs(def_id);
let attr = tcx.sess.find_by_name(&attrs, sym::rustc_reservation_impl);
let value = attr.and_then(|a| a.value_str());
if let Some(value) = value {
debug!(
"filter_reservation_impls: \
reservation impl ambiguity on {:?}",
def_id
);
intercrate_ambiguity_clauses.push(
IntercrateAmbiguityCause::ReservationImpl {
message: value.to_string(),
},
);
}
}
return Ok(None);
}
}
Ok(Some(candidate))
}
fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
debug!("is_knowable(intercrate={:?})", self.intercrate);
if !self.intercrate || stack.obligation.polarity() == ty::ImplPolarity::Negative {
return None;
}
let obligation = &stack.obligation;
let predicate = self.infcx().resolve_vars_if_possible(obligation.predicate);
// Okay to skip binder because of the nature of the
// trait-ref-is-knowable check, which does not care about
// bound regions.
let trait_ref = predicate.skip_binder().trait_ref;
coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
}
/// Returns `true` if the global caches can be used.
fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
// If there are any inference variables in the `ParamEnv`, then we
// always use a cache local to this particular scope. Otherwise, we
// switch to a global cache.
if param_env.needs_infer() {
return false;
}
// Avoid using the master cache during coherence and just rely
// on the local cache. This effectively disables caching
// during coherence. It is really just a simplification to
// avoid us having to fear that coherence results "pollute"
// the master cache. Since coherence executes pretty quickly,
// it's not worth going to more trouble to increase the
// hit-rate, I don't think.
if self.intercrate {
return false;
}
// Otherwise, we can use the global cache.
true
}
fn check_candidate_cache(
&mut self,
mut param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
// Neither the global nor local cache is aware of intercrate
// mode, so don't do any caching. In particular, we might
// re-use the same `InferCtxt` with both an intercrate
// and non-intercrate `SelectionContext`
if self.intercrate {
return None;
}
let tcx = self.tcx();
let mut pred = cache_fresh_trait_pred.skip_binder();
pred.remap_constness(tcx, &mut param_env);
if self.can_use_global_caches(param_env) {
if let Some(res) = tcx.selection_cache.get(¶m_env.and(pred), tcx) {
return Some(res);
}
}
self.infcx.selection_cache.get(¶m_env.and(pred), tcx)
}
/// Determines whether can we safely cache the result
/// of selecting an obligation. This is almost always `true`,
/// except when dealing with certain `ParamCandidate`s.
///
/// Ordinarily, a `ParamCandidate` will contain no inference variables,
/// since it was usually produced directly from a `DefId`. However,
/// certain cases (currently only librustdoc's blanket impl finder),
/// a `ParamEnv` may be explicitly constructed with inference types.
/// When this is the case, we do *not* want to cache the resulting selection
/// candidate. This is due to the fact that it might not always be possible
/// to equate the obligation's trait ref and the candidate's trait ref,
/// if more constraints end up getting added to an inference variable.
///
/// Because of this, we always want to re-run the full selection
/// process for our obligation the next time we see it, since
/// we might end up picking a different `SelectionCandidate` (or none at all).
fn can_cache_candidate(
&self,
result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
) -> bool {
// Neither the global nor local cache is aware of intercrate
// mode, so don't do any caching. In particular, we might
// re-use the same `InferCtxt` with both an intercrate
// and non-intercrate `SelectionContext`
if self.intercrate {
return false;
}
match result {
Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.needs_infer(),
_ => true,
}
}
fn insert_candidate_cache(
&mut self,
mut param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
dep_node: DepNodeIndex,
candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
) {
let tcx = self.tcx();
let mut pred = cache_fresh_trait_pred.skip_binder();
pred.remap_constness(tcx, &mut param_env);
if !self.can_cache_candidate(&candidate) {
debug!(?pred, ?candidate, "insert_candidate_cache - candidate is not cacheable");
return;
}
if self.can_use_global_caches(param_env) {
if let Err(Overflow) = candidate {
// Don't cache overflow globally; we only produce this in certain modes.
} else if !pred.needs_infer() {
if !candidate.needs_infer() {
debug!(?pred, ?candidate, "insert_candidate_cache global");
// This may overwrite the cache with the same value.
tcx.selection_cache.insert(param_env.and(pred), dep_node, candidate);
return;
}
}
}
debug!(?pred, ?candidate, "insert_candidate_cache local");
self.infcx.selection_cache.insert(param_env.and(pred), dep_node, candidate);
}
/// Matches a predicate against the bounds of its self type.
///
/// Given an obligation like `<T as Foo>::Bar: Baz` where the self type is
/// a projection, look at the bounds of `T::Bar`, see if we can find a
/// `Baz` bound. We return indexes into the list returned by
/// `tcx.item_bounds` for any applicable bounds.
fn match_projection_obligation_against_definition_bounds(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> smallvec::SmallVec<[usize; 2]> {
let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate);
let placeholder_trait_predicate =
self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate);
debug!(
?placeholder_trait_predicate,
"match_projection_obligation_against_definition_bounds"
);
let tcx = self.infcx.tcx;
let (def_id, substs) = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
ty::Projection(ref data) => (data.item_def_id, data.substs),
ty::Opaque(def_id, substs) => (def_id, substs),
_ => {
span_bug!(
obligation.cause.span,
"match_projection_obligation_against_definition_bounds() called \
but self-ty is not a projection: {:?}",
placeholder_trait_predicate.trait_ref.self_ty()
);
}
};
let bounds = tcx.item_bounds(def_id).subst(tcx, substs);
// The bounds returned by `item_bounds` may contain duplicates after
// normalization, so try to deduplicate when possible to avoid
// unnecessary ambiguity.
let mut distinct_normalized_bounds = FxHashSet::default();
let matching_bounds = bounds
.iter()
.enumerate()
.filter_map(|(idx, bound)| {
let bound_predicate = bound.kind();
if let ty::PredicateKind::Trait(pred) = bound_predicate.skip_binder() {
let bound = bound_predicate.rebind(pred.trait_ref);
if self.infcx.probe(|_| {
match self.match_normalize_trait_ref(
obligation,
bound,
placeholder_trait_predicate.trait_ref,
) {
Ok(None) => true,
Ok(Some(normalized_trait))
if distinct_normalized_bounds.insert(normalized_trait) =>
{
true
}
_ => false,
}
}) {
return Some(idx);
}
}
None
})
.collect();
debug!(?matching_bounds, "match_projection_obligation_against_definition_bounds");
matching_bounds
}
/// Equates the trait in `obligation` with trait bound. If the two traits
/// can be equated and the normalized trait bound doesn't contain inference
/// variables or placeholders, the normalized bound is returned.
fn match_normalize_trait_ref(
&mut self,
obligation: &TraitObligation<'tcx>,
trait_bound: ty::PolyTraitRef<'tcx>,
placeholder_trait_ref: ty::TraitRef<'tcx>,
) -> Result<Option<ty::PolyTraitRef<'tcx>>, ()> {
debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
if placeholder_trait_ref.def_id != trait_bound.def_id() {
// Avoid unnecessary normalization
return Err(());
}
let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| {
project::normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
trait_bound,
)
});
self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound)
.map(|InferOk { obligations: _, value: () }| {
// This method is called within a probe, so we can't have
// inference variables and placeholders escape.
if !trait_bound.needs_infer() && !trait_bound.has_placeholders() {
Some(trait_bound)
} else {
None
}
})
.map_err(|_| ())
}
fn evaluate_where_clause<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
self.evaluation_probe(|this| {
match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations),
Err(()) => Ok(EvaluatedToErr),
}
})
}
pub(super) fn match_projection_projections(
&mut self,
obligation: &ProjectionTyObligation<'tcx>,
env_predicate: PolyProjectionPredicate<'tcx>,
potentially_unnormalized_candidates: bool,
) -> bool {
let mut nested_obligations = Vec::new();
let (infer_predicate, _) = self.infcx.replace_bound_vars_with_fresh_vars(
obligation.cause.span,
LateBoundRegionConversionTime::HigherRankedType,
env_predicate,
);
let infer_projection = if potentially_unnormalized_candidates {
ensure_sufficient_stack(|| {
project::normalize_with_depth_to(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
infer_predicate.projection_ty,
&mut nested_obligations,
)
})
} else {
infer_predicate.projection_ty
};
self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(obligation.predicate, infer_projection)
.map_or(false, |InferOk { obligations, value: () }| {
self.evaluate_predicates_recursively(
TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
nested_obligations.into_iter().chain(obligations),
)
.map_or(false, |res| res.may_apply())
})
}
///////////////////////////////////////////////////////////////////////////
// WINNOW
//
// Winnowing is the process of attempting to resolve ambiguity by
// probing further. During the winnowing process, we unify all
// type variables and then we also attempt to evaluate recursive
// bounds to see if they are satisfied.
/// Returns `true` if `victim` should be dropped in favor of
/// `other`. Generally speaking we will drop duplicate
/// candidates and prefer where-clause candidates.
///
/// See the comment for "SelectionCandidate" for more details.
fn candidate_should_be_dropped_in_favor_of(
&mut self,
sized_predicate: bool,
victim: &EvaluatedCandidate<'tcx>,
other: &EvaluatedCandidate<'tcx>,
needs_infer: bool,
) -> bool {
if victim.candidate == other.candidate {
return true;
}
// Check if a bound would previously have been removed when normalizing
// the param_env so that it can be given the lowest priority. See
// #50825 for the motivation for this.
let is_global = |cand: &ty::PolyTraitPredicate<'tcx>| {
cand.is_global() && !cand.has_late_bound_regions()
};
// (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`,
// `DiscriminantKindCandidate`, and `ConstDropCandidate` to anything else.
//
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
match (&other.candidate, &victim.candidate) {
(_, AutoImplCandidate(..)) | (AutoImplCandidate(..), _) => {
bug!(
"default implementations shouldn't be recorded \
when there are other valid candidates"
);
}
// (*)
(
BuiltinCandidate { has_nested: false }
| DiscriminantKindCandidate
| PointeeCandidate
| ConstDropCandidate(_),
_,
) => true,
(
_,
BuiltinCandidate { has_nested: false }
| DiscriminantKindCandidate
| PointeeCandidate
| ConstDropCandidate(_),
) => false,
(ParamCandidate(other), ParamCandidate(victim)) => {
let same_except_bound_vars = other.skip_binder().trait_ref
== victim.skip_binder().trait_ref
&& other.skip_binder().constness == victim.skip_binder().constness
&& other.skip_binder().polarity == victim.skip_binder().polarity
&& !other.skip_binder().trait_ref.has_escaping_bound_vars();
if same_except_bound_vars {
// See issue #84398. In short, we can generate multiple ParamCandidates which are
// the same except for unused bound vars. Just pick the one with the fewest bound vars
// or the current one if tied (they should both evaluate to the same answer). This is
// probably best characterized as a "hack", since we might prefer to just do our
// best to *not* create essentially duplicate candidates in the first place.
other.bound_vars().len() <= victim.bound_vars().len()
} else if other.skip_binder().trait_ref == victim.skip_binder().trait_ref
&& victim.skip_binder().constness == ty::BoundConstness::NotConst
&& other.skip_binder().polarity == victim.skip_binder().polarity
{
// Drop otherwise equivalent non-const candidates in favor of const candidates.
true
} else {
false
}
}
// Drop otherwise equivalent non-const fn pointer candidates
(FnPointerCandidate { .. }, FnPointerCandidate { is_const: false }) => true,
// If obligation is a sized predicate or the where-clause bound is
// global, prefer the projection or object candidate. See issue
// #50825 and #89352.
(ObjectCandidate(_) | ProjectionCandidate(_), ParamCandidate(ref cand)) => {
sized_predicate || is_global(cand)
}
(ParamCandidate(ref cand), ObjectCandidate(_) | ProjectionCandidate(_)) => {
!(sized_predicate || is_global(cand))
}
// Global bounds from the where clause should be ignored
// here (see issue #50825). Otherwise, we have a where
// clause so don't go around looking for impls.
// Arbitrarily give param candidates priority
// over projection and object candidates.
(
ParamCandidate(ref cand),
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate { .. }
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { .. }
| TraitAliasCandidate(..),
) => !is_global(cand),
(
ImplCandidate(_)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate { .. }
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { has_nested: true }
| TraitAliasCandidate(..),
ParamCandidate(ref cand),
) => {
// Prefer these to a global where-clause bound
// (see issue #50825).
is_global(cand) && other.evaluation.must_apply_modulo_regions()
}
(ProjectionCandidate(i), ProjectionCandidate(j))
| (ObjectCandidate(i), ObjectCandidate(j)) => {
// Arbitrarily pick the lower numbered candidate for backwards
// compatibility reasons. Don't let this affect inference.
i < j && !needs_infer
}
(ObjectCandidate(_), ProjectionCandidate(_))
| (ProjectionCandidate(_), ObjectCandidate(_)) => {
bug!("Have both object and projection candidate")
}
// Arbitrarily give projection and object candidates priority.
(
ObjectCandidate(_) | ProjectionCandidate(_),
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate { .. }
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { .. }
| TraitAliasCandidate(..),
) => true,
(
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate { .. }
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { .. }
| TraitAliasCandidate(..),
ObjectCandidate(_) | ProjectionCandidate(_),
) => false,
(&ImplCandidate(other_def), &ImplCandidate(victim_def)) => {
// See if we can toss out `victim` based on specialization.
// This requires us to know *for sure* that the `other` impl applies
// i.e., `EvaluatedToOk`.
//
// FIXME(@lcnr): Using `modulo_regions` here seems kind of scary
// to me but is required for `std` to compile, so I didn't change it
// for now.
let tcx = self.tcx();
if other.evaluation.must_apply_modulo_regions() {
if tcx.specializes((other_def, victim_def)) {
return true;
}
}
if other.evaluation.must_apply_considering_regions() {
match tcx.impls_are_allowed_to_overlap(other_def, victim_def) {
Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
// Subtle: If the predicate we are evaluating has inference
// variables, do *not* allow discarding candidates due to
// marker trait impls.
//
// Without this restriction, we could end up accidentally
// constrainting inference variables based on an arbitrarily
// chosen trait impl.
//
// Imagine we have the following code:
//
// ```rust
// #[marker] trait MyTrait {}
// impl MyTrait for u8 {}
// impl MyTrait for bool {}
// ```
//
// And we are evaluating the predicate `<_#0t as MyTrait>`.
//
// During selection, we will end up with one candidate for each
// impl of `MyTrait`. If we were to discard one impl in favor
// of the other, we would be left with one candidate, causing
// us to "successfully" select the predicate, unifying
// _#0t with (for example) `u8`.
//
// However, we have no reason to believe that this unification
// is correct - we've essentially just picked an arbitrary
// *possibility* for _#0t, and required that this be the *only*
// possibility.
//
// Eventually, we will either:
// 1) Unify all inference variables in the predicate through
// some other means (e.g. type-checking of a function). We will
// then be in a position to drop marker trait candidates
// without constraining inference variables (since there are
// none left to constrin)
// 2) Be left with some unconstrained inference variables. We
// will then correctly report an inference error, since the
// existence of multiple marker trait impls tells us nothing
// about which one should actually apply.
!needs_infer
}
Some(_) => true,
None => false,
}
} else {
false
}
}
// Everything else is ambiguous
(
ImplCandidate(_)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate { .. }
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { has_nested: true }
| TraitAliasCandidate(..),
ImplCandidate(_)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate { .. }
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { has_nested: true }
| TraitAliasCandidate(..),
) => false,
}
}
fn sized_conditions(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
use self::BuiltinImplConditions::{Ambiguous, None, Where};
// NOTE: binder moved to (*)
let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
match self_ty.kind() {
ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
| ty::Error(_) => {
// safe for everything
Where(ty::Binder::dummy(Vec::new()))
}
ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
ty::Tuple(tys) => Where(
obligation
.predicate
.rebind(tys.last().into_iter().map(|k| k.expect_ty()).collect()),
),
ty::Adt(def, substs) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(
obligation.predicate.rebind({
sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect()
}),
)
}
ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
ty::Infer(ty::TyVar(_)) => Ambiguous,
ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
}
}
}
fn copy_clone_conditions(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
// NOTE: binder moved to (*)
let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
use self::BuiltinImplConditions::{Ambiguous, None, Where};
match *self_ty.kind() {
ty::Infer(ty::IntVar(_))
| ty::Infer(ty::FloatVar(_))
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::Char
| ty::RawPtr(..)
| ty::Never
| ty::Ref(_, _, hir::Mutability::Not)
| ty::Array(..) => {
// Implementations provided in libcore
None
}
ty::Dynamic(..)
| ty::Str
| ty::Slice(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Foreign(..)
| ty::Ref(_, _, hir::Mutability::Mut) => None,
ty::Tuple(tys) => {
// (*) binder moved here
Where(obligation.predicate.rebind(tys.iter().map(|k| k.expect_ty()).collect()))
}
ty::Closure(_, substs) => {
// (*) binder moved here
let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
if let ty::Infer(ty::TyVar(_)) = ty.kind() {
// Not yet resolved.
Ambiguous
} else {
Where(obligation.predicate.rebind(substs.as_closure().upvar_tys().collect()))
}
}
ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
// Fallback to whatever user-defined impls exist in this case.
None
}
ty::Infer(ty::TyVar(_)) => {
// Unbound type variable. Might or might not have
// applicable impls and so forth, depending on what
// those type variables wind up being bound to.
Ambiguous
}
ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
}
}
}
/// For default impls, we need to break apart a type into its
/// "constituent types" -- meaning, the types that it contains.
///
/// Here are some (simple) examples:
///
/// ```
/// (i32, u32) -> [i32, u32]
/// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
/// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
/// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
/// ```
fn constituent_types_for_ty(
&self,
t: ty::Binder<'tcx, Ty<'tcx>>,
) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
match *t.skip_binder().kind() {
ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::Str
| ty::Error(_)
| ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Never
| ty::Char => ty::Binder::dummy(Vec::new()),
ty::Placeholder(..)
| ty::Dynamic(..)
| ty::Param(..)
| ty::Foreign(..)
| ty::Projection(..)
| ty::Bound(..)
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("asked to assemble constituent types of unexpected type: {:?}", t);
}
ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => {
t.rebind(vec![element_ty])
}
ty::Array(element_ty, _) | ty::Slice(element_ty) => t.rebind(vec![element_ty]),
ty::Tuple(ref tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
t.rebind(tys.iter().map(|k| k.expect_ty()).collect())
}
ty::Closure(_, ref substs) => {
let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
t.rebind(vec![ty])
}
ty::Generator(_, ref substs, _) => {
let ty = self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty());
let witness = substs.as_generator().witness();
t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
}
ty::GeneratorWitness(types) => {
debug_assert!(!types.has_escaping_bound_vars());
types.map_bound(|types| types.to_vec())
}
// For `PhantomData<T>`, we pass `T`.
ty::Adt(def, substs) if def.is_phantom_data() => t.rebind(substs.types().collect()),
ty::Adt(def, substs) => {
t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect())
}
ty::Opaque(def_id, substs) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
t.rebind(vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)])
}
}
}
fn collect_predicates_for_types(
&mut self,
param_env: ty::ParamEnv<'tcx>,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
trait_def_id: DefId,
types: ty::Binder<'tcx, Vec<Ty<'tcx>>>,
) -> Vec<PredicateObligation<'tcx>> {
// Because the types were potentially derived from
// higher-ranked obligations they may reference late-bound
// regions. For example, `for<'a> Foo<&'a i32> : Copy` would
// yield a type like `for<'a> &'a i32`. In general, we
// maintain the invariant that we never manipulate bound
// regions, so we have to process these bound regions somehow.
//
// The strategy is to:
//
// 1. Instantiate those regions to placeholder regions (e.g.,
// `for<'a> &'a i32` becomes `&0 i32`.
// 2. Produce something like `&'0 i32 : Copy`
// 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
types
.as_ref()
.skip_binder() // binder moved -\
.iter()
.flat_map(|ty| {
let ty: ty::Binder<'tcx, Ty<'tcx>> = types.rebind(ty); // <----/
self.infcx.commit_unconditionally(|_| {
let placeholder_ty = self.infcx.replace_bound_vars_with_placeholders(ty);
let Normalized { value: normalized_ty, mut obligations } =
ensure_sufficient_stack(|| {
project::normalize_with_depth(
self,
param_env,
cause.clone(),
recursion_depth,
placeholder_ty,
)
});
let placeholder_obligation = predicate_for_trait_def(
self.tcx(),
param_env,
cause.clone(),
trait_def_id,
recursion_depth,
normalized_ty,
&[],
);
obligations.push(placeholder_obligation);
obligations
})
})
.collect()
}
///////////////////////////////////////////////////////////////////////////
// Matching
//
// Matching is a common path used for both evaluation and
// confirmation. It basically unifies types that appear in impls
// and traits. This does affect the surrounding environment;
// therefore, when used during evaluation, match routines must be
// run inside of a `probe()` so that their side-effects are
// contained.
fn rematch_impl(
&mut self,
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
) -> Normalized<'tcx, SubstsRef<'tcx>> {
match self.match_impl(impl_def_id, obligation) {
Ok(substs) => substs,
Err(()) => {
bug!(
"Impl {:?} was matchable against {:?} but now is not",
impl_def_id,
obligation
);
}
}
}
#[tracing::instrument(level = "debug", skip(self))]
fn match_impl(
&mut self,
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
// Before we create the substitutions and everything, first
// consider a "quick reject". This avoids creating more types
// and so forth that we need to.
if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
return Err(());
}
let placeholder_obligation =
self.infcx().replace_bound_vars_with_placeholders(obligation.predicate);
let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
debug!(?impl_trait_ref);
let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
ensure_sufficient_stack(|| {
project::normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
impl_trait_ref,
)
});
debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref);
let cause = ObligationCause::new(
obligation.cause.span,
obligation.cause.body_id,
ObligationCauseCode::MatchImpl(obligation.cause.clone(), impl_def_id),
);
let InferOk { obligations, .. } = self
.infcx
.at(&cause, obligation.param_env)
.eq(placeholder_obligation_trait_ref, impl_trait_ref)
.map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
nested_obligations.extend(obligations);
if !self.intercrate
&& self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation
{
debug!("match_impl: reservation impls only apply in intercrate mode");
return Err(());
}
debug!(?impl_substs, ?nested_obligations, "match_impl: success");
Ok(Normalized { value: impl_substs, obligations: nested_obligations })
}
fn fast_reject_trait_refs(
&mut self,
obligation: &TraitObligation<'_>,
impl_trait_ref: &ty::TraitRef<'_>,
) -> bool {
// We can avoid creating type variables and doing the full
// substitution if we find that any of the input types, when
// simplified, do not match.
iter::zip(obligation.predicate.skip_binder().trait_ref.substs, impl_trait_ref.substs).any(
|(obligation_arg, impl_arg)| {
match (obligation_arg.unpack(), impl_arg.unpack()) {
(GenericArgKind::Type(obligation_ty), GenericArgKind::Type(impl_ty)) => {
// Note, we simplify parameters for the obligation but not the
// impl so that we do not reject a blanket impl but do reject
// more concrete impls if we're searching for `T: Trait`.
let simplified_obligation_ty = fast_reject::simplify_type(
self.tcx(),
obligation_ty,
SimplifyParams::Yes,
StripReferences::No,
);
let simplified_impl_ty = fast_reject::simplify_type(
self.tcx(),
impl_ty,
SimplifyParams::No,
StripReferences::No,
);
simplified_obligation_ty.is_some()
&& simplified_impl_ty.is_some()
&& simplified_obligation_ty != simplified_impl_ty
}
(GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => {
// Lifetimes can never cause a rejection.
false
}
(GenericArgKind::Const(_), GenericArgKind::Const(_)) => {
// Conservatively ignore consts (i.e. assume they might
// unify later) until we have `fast_reject` support for
// them (if we'll ever need it, even).
false
}
_ => unreachable!(),
}
},
)
}
/// Normalize `where_clause_trait_ref` and try to match it against
/// `obligation`. If successful, return any predicates that
/// result from the normalization.
fn match_where_clause_trait_ref(
&mut self,
obligation: &TraitObligation<'tcx>,
where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
self.match_poly_trait_ref(obligation, where_clause_trait_ref)
}
/// Returns `Ok` if `poly_trait_ref` being true implies that the
/// obligation is satisfied.
#[instrument(skip(self), level = "debug")]
fn match_poly_trait_ref(
&mut self,
obligation: &TraitObligation<'tcx>,
poly_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref)
.map(|InferOk { obligations, .. }| obligations)
.map_err(|_| ())
}
///////////////////////////////////////////////////////////////////////////
// Miscellany
fn match_fresh_trait_refs(
&self,
previous: ty::PolyTraitPredicate<'tcx>,
current: ty::PolyTraitPredicate<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> bool {
let mut matcher = ty::_match::Match::new(self.tcx(), param_env);
matcher.relate(previous, current).is_ok()
}
fn push_stack<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
obligation: &'o TraitObligation<'tcx>,
) -> TraitObligationStack<'o, 'tcx> {
let fresh_trait_pred = obligation.predicate.fold_with(&mut self.freshener);
let dfn = previous_stack.cache.next_dfn();
let depth = previous_stack.depth() + 1;
TraitObligationStack {
obligation,
fresh_trait_pred,
reached_depth: Cell::new(depth),
previous: previous_stack,
dfn,
depth,
}
}
#[instrument(skip(self), level = "debug")]
fn closure_trait_ref_unnormalized(
&mut self,
obligation: &TraitObligation<'tcx>,
substs: SubstsRef<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
let closure_sig = substs.as_closure().sig();
debug!(?closure_sig);
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an unboxed closure type and hence is
// in fact unparameterized (or at least does not reference any
// regions bound in the obligation). Still probably some
// refactoring could make this nicer.
closure_trait_ref_and_return_type(
self.tcx(),
obligation.predicate.def_id(),
obligation.predicate.skip_binder().self_ty(), // (1)
closure_sig,
util::TupleArgumentsFlag::No,
)
.map_bound(|(trait_ref, _)| trait_ref)
}
fn generator_trait_ref_unnormalized(
&mut self,
obligation: &TraitObligation<'tcx>,
substs: SubstsRef<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
let gen_sig = substs.as_generator().poly_sig();
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an generator type and hence is
// in fact unparameterized (or at least does not reference any
// regions bound in the obligation). Still probably some
// refactoring could make this nicer.
super::util::generator_trait_ref_and_outputs(
self.tcx(),
obligation.predicate.def_id(),
obligation.predicate.skip_binder().self_ty(), // (1)
gen_sig,
)
.map_bound(|(trait_ref, ..)| trait_ref)
}
/// Returns the obligations that are implied by instantiating an
/// impl or trait. The obligations are substituted and fully
/// normalized. This is used when confirming an impl or default
/// impl.
#[tracing::instrument(level = "debug", skip(self, cause, param_env))]
fn impl_or_trait_obligations(
&mut self,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId, // of impl or trait
substs: SubstsRef<'tcx>, // for impl or trait
) -> Vec<PredicateObligation<'tcx>> {
let tcx = self.tcx();
// To allow for one-pass evaluation of the nested obligation,
// each predicate must be preceded by the obligations required
// to normalize it.
// for example, if we have:
// impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
// the impl will have the following predicates:
// <V as Iterator>::Item = U,
// U: Iterator, U: Sized,
// V: Iterator, V: Sized,
// <U as Iterator>::Item: Copy
// When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
// obligation will normalize to `<$0 as Iterator>::Item = $1` and
// `$1: Copy`, so we must ensure the obligations are emitted in
// that order.
let predicates = tcx.predicates_of(def_id);
debug!(?predicates);
assert_eq!(predicates.parent, None);
let mut obligations = Vec::with_capacity(predicates.predicates.len());
for (predicate, _) in predicates.predicates {
debug!(?predicate);
let predicate = normalize_with_depth_to(
self,
param_env,
cause.clone(),
recursion_depth,
predicate.subst(tcx, substs),
&mut obligations,
);
obligations.push(Obligation {
cause: cause.clone(),
recursion_depth,
param_env,
predicate,
});
}
// We are performing deduplication here to avoid exponential blowups
// (#38528) from happening, but the real cause of the duplication is
// unknown. What we know is that the deduplication avoids exponential
// amount of predicates being propagated when processing deeply nested
// types.
//
// This code is hot enough that it's worth avoiding the allocation
// required for the FxHashSet when possible. Special-casing lengths 0,
// 1 and 2 covers roughly 75-80% of the cases.
if obligations.len() <= 1 {
// No possibility of duplicates.
} else if obligations.len() == 2 {
// Only two elements. Drop the second if they are equal.
if obligations[0] == obligations[1] {
obligations.truncate(1);
}
} else {
// Three or more elements. Use a general deduplication process.
let mut seen = FxHashSet::default();
obligations.retain(|i| seen.insert(i.clone()));
}
obligations
}
}
trait TraitObligationExt<'tcx> {
fn derived_cause(
&self,
variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
) -> ObligationCause<'tcx>;
}
impl<'tcx> TraitObligationExt<'tcx> for TraitObligation<'tcx> {
fn derived_cause(
&self,
variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
) -> ObligationCause<'tcx> {
/*!
* Creates a cause for obligations that are derived from
* `obligation` by a recursive search (e.g., for a builtin
* bound, or eventually a `auto trait Foo`). If `obligation`
* is itself a derived obligation, this is just a clone, but
* otherwise we create a "derived obligation" cause so as to
* keep track of the original root obligation for error
* reporting.
*/
let obligation = self;
// NOTE(flaper87): As of now, it keeps track of the whole error
// chain. Ideally, we should have a way to configure this either
// by using -Z verbose or just a CLI argument.
let derived_cause = DerivedObligationCause {
parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
parent_code: obligation.cause.clone_code(),
};
let derived_code = variant(derived_cause);
ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code)
}
}
impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList::with(self)
}
fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
self.previous.cache
}
fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
self.list()
}
/// Indicates that attempting to evaluate this stack entry
/// required accessing something from the stack at depth `reached_depth`.
fn update_reached_depth(&self, reached_depth: usize) {
assert!(
self.depth >= reached_depth,
"invoked `update_reached_depth` with something under this stack: \
self.depth={} reached_depth={}",
self.depth,
reached_depth,
);
debug!(reached_depth, "update_reached_depth");
let mut p = self;
while reached_depth < p.depth {
debug!(?p.fresh_trait_pred, "update_reached_depth: marking as cycle participant");
p.reached_depth.set(p.reached_depth.get().min(reached_depth));
p = p.previous.head.unwrap();
}
}
}
/// The "provisional evaluation cache" is used to store intermediate cache results
/// when solving auto traits. Auto traits are unusual in that they can support
/// cycles. So, for example, a "proof tree" like this would be ok:
///
/// - `Foo<T>: Send` :-
/// - `Bar<T>: Send` :-
/// - `Foo<T>: Send` -- cycle, but ok
/// - `Baz<T>: Send`
///
/// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
/// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
/// For non-auto traits, this cycle would be an error, but for auto traits (because
/// they are coinductive) it is considered ok.
///
/// However, there is a complication: at the point where we have
/// "proven" `Bar<T>: Send`, we have in fact only proven it
/// *provisionally*. In particular, we proved that `Bar<T>: Send`
/// *under the assumption* that `Foo<T>: Send`. But what if we later
/// find out this assumption is wrong? Specifically, we could
/// encounter some kind of error proving `Baz<T>: Send`. In that case,
/// `Bar<T>: Send` didn't turn out to be true.
///
/// In Issue #60010, we found a bug in rustc where it would cache
/// these intermediate results. This was fixed in #60444 by disabling
/// *all* caching for things involved in a cycle -- in our example,
/// that would mean we don't cache that `Bar<T>: Send`. But this led
/// to large slowdowns.
///
/// Specifically, imagine this scenario, where proving `Baz<T>: Send`
/// first requires proving `Bar<T>: Send` (which is true:
///
/// - `Foo<T>: Send` :-
/// - `Bar<T>: Send` :-
/// - `Foo<T>: Send` -- cycle, but ok
/// - `Baz<T>: Send`
/// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
/// - `*const T: Send` -- but what if we later encounter an error?
///
/// The *provisional evaluation cache* resolves this issue. It stores
/// cache results that we've proven but which were involved in a cycle
/// in some way. We track the minimal stack depth (i.e., the
/// farthest from the top of the stack) that we are dependent on.
/// The idea is that the cache results within are all valid -- so long as
/// none of the nodes in between the current node and the node at that minimum
/// depth result in an error (in which case the cached results are just thrown away).
///
/// During evaluation, we consult this provisional cache and rely on
/// it. Accessing a cached value is considered equivalent to accessing
/// a result at `reached_depth`, so it marks the *current* solution as
/// provisional as well. If an error is encountered, we toss out any
/// provisional results added from the subtree that encountered the
/// error. When we pop the node at `reached_depth` from the stack, we
/// can commit all the things that remain in the provisional cache.
struct ProvisionalEvaluationCache<'tcx> {
/// next "depth first number" to issue -- just a counter
dfn: Cell<usize>,
/// Map from cache key to the provisionally evaluated thing.
/// The cache entries contain the result but also the DFN in which they
/// were added. The DFN is used to clear out values on failure.
///
/// Imagine we have a stack like:
///
/// - `A B C` and we add a cache for the result of C (DFN 2)
/// - Then we have a stack `A B D` where `D` has DFN 3
/// - We try to solve D by evaluating E: `A B D E` (DFN 4)
/// - `E` generates various cache entries which have cyclic dependices on `B`
/// - `A B D E F` and so forth
/// - the DFN of `F` for example would be 5
/// - then we determine that `E` is in error -- we will then clear
/// all cache values whose DFN is >= 4 -- in this case, that
/// means the cached value for `F`.
map: RefCell<FxHashMap<ty::PolyTraitPredicate<'tcx>, ProvisionalEvaluation>>,
}
/// A cache value for the provisional cache: contains the depth-first
/// number (DFN) and result.
#[derive(Copy, Clone, Debug)]
struct ProvisionalEvaluation {
from_dfn: usize,
reached_depth: usize,
result: EvaluationResult,
/// The `DepNodeIndex` created for the `evaluate_stack` call for this provisional
/// evaluation. When we create an entry in the evaluation cache using this provisional
/// cache entry (see `on_completion`), we use this `dep_node` to ensure that future reads from
/// the cache will have all of the necessary incr comp dependencies tracked.
dep_node: DepNodeIndex,
}
impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
fn default() -> Self {
Self { dfn: Cell::new(0), map: Default::default() }
}
}
impl<'tcx> ProvisionalEvaluationCache<'tcx> {
/// Get the next DFN in sequence (basically a counter).
fn next_dfn(&self) -> usize {
let result = self.dfn.get();
self.dfn.set(result + 1);
result
}
/// Check the provisional cache for any result for
/// `fresh_trait_ref`. If there is a hit, then you must consider
/// it an access to the stack slots at depth
/// `reached_depth` (from the returned value).
fn get_provisional(
&self,
fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
) -> Option<ProvisionalEvaluation> {
debug!(
?fresh_trait_pred,
"get_provisional = {:#?}",
self.map.borrow().get(&fresh_trait_pred),
);
Some(*self.map.borrow().get(&fresh_trait_pred)?)
}
/// Insert a provisional result into the cache. The result came
/// from the node with the given DFN. It accessed a minimum depth
/// of `reached_depth` to compute. It evaluated `fresh_trait_pred`
/// and resulted in `result`.
fn insert_provisional(
&self,
from_dfn: usize,
reached_depth: usize,
fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
result: EvaluationResult,
dep_node: DepNodeIndex,
) {
debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional");
let mut map = self.map.borrow_mut();
// Subtle: when we complete working on the DFN `from_dfn`, anything
// that remains in the provisional cache must be dependent on some older
// stack entry than `from_dfn`. We have to update their depth with our transitive
// depth in that case or else it would be referring to some popped note.
//
// Example:
// A (reached depth 0)
// ...
// B // depth 1 -- reached depth = 0
// C // depth 2 -- reached depth = 1 (should be 0)
// B
// A // depth 0
// D (reached depth 1)
// C (cache -- reached depth = 2)
for (_k, v) in &mut *map {
if v.from_dfn >= from_dfn {
v.reached_depth = reached_depth.min(v.reached_depth);
}
}
map.insert(
fresh_trait_pred,
ProvisionalEvaluation { from_dfn, reached_depth, result, dep_node },
);
}
/// Invoked when the node with dfn `dfn` does not get a successful
/// result. This will clear out any provisional cache entries
/// that were added since `dfn` was created. This is because the
/// provisional entries are things which must assume that the
/// things on the stack at the time of their creation succeeded --
/// since the failing node is presently at the top of the stack,
/// these provisional entries must either depend on it or some
/// ancestor of it.
fn on_failure(&self, dfn: usize) {
debug!(?dfn, "on_failure");
self.map.borrow_mut().retain(|key, eval| {
if !eval.from_dfn >= dfn {
debug!("on_failure: removing {:?}", key);
false
} else {
true
}
});
}
/// Invoked when the node at depth `depth` completed without
/// depending on anything higher in the stack (if that completion
/// was a failure, then `on_failure` should have been invoked
/// already). The callback `op` will be invoked for each
/// provisional entry that we can now confirm.
///
/// Note that we may still have provisional cache items remaining
/// in the cache when this is done. For example, if there is a
/// cycle:
///
/// * A depends on...
/// * B depends on A
/// * C depends on...
/// * D depends on C
/// * ...
///
/// Then as we complete the C node we will have a provisional cache
/// with results for A, B, C, and D. This method would clear out
/// the C and D results, but leave A and B provisional.
///
/// This is determined based on the DFN: we remove any provisional
/// results created since `dfn` started (e.g., in our example, dfn
/// would be 2, representing the C node, and hence we would
/// remove the result for D, which has DFN 3, but not the results for
/// A and B, which have DFNs 0 and 1 respectively).
fn on_completion(
&self,
dfn: usize,
mut op: impl FnMut(ty::PolyTraitPredicate<'tcx>, EvaluationResult, DepNodeIndex),
) {
debug!(?dfn, "on_completion");
for (fresh_trait_pred, eval) in
self.map.borrow_mut().drain_filter(|_k, eval| eval.from_dfn >= dfn)
{
debug!(?fresh_trait_pred, ?eval, "on_completion");
op(fresh_trait_pred, eval.result, eval.dep_node);
}
}
}
#[derive(Copy, Clone)]
struct TraitObligationStackList<'o, 'tcx> {
cache: &'o ProvisionalEvaluationCache<'tcx>,
head: Option<&'o TraitObligationStack<'o, 'tcx>>,
}
impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList { cache, head: None }
}
fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList { cache: r.cache(), head: Some(r) }
}
fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
self.head
}
fn depth(&self) -> usize {
if let Some(head) = self.head { head.depth } else { 0 }
}
}
impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
type Item = &'o TraitObligationStack<'o, 'tcx>;
fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
let o = self.head?;
*self = o.previous;
Some(o)
}
}
impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "TraitObligationStack({:?})", self.obligation)
}
}
| 41.981979 | 109 | 0.546382 |
fcf48f8a69db2d50f94456f042227c6ecf918fba | 3,240 | use core::slice;
use std::ffi::CStr;
use std::str::from_utf8_unchecked;
use libsqlite3_sys::{
sqlite3_column_blob, sqlite3_column_bytes, sqlite3_column_double, sqlite3_column_int,
sqlite3_column_int64, sqlite3_column_text, sqlite3_column_type, SQLITE_BLOB, SQLITE_FLOAT,
SQLITE_INTEGER, SQLITE_NULL, SQLITE_TEXT,
};
use crate::sqlite::statement::Statement;
use crate::sqlite::type_info::SqliteType;
use crate::sqlite::{Sqlite, SqliteTypeInfo};
use crate::value::RawValue;
pub struct SqliteValue<'c> {
pub(super) index: i32,
pub(super) statement: &'c Statement,
}
// https://www.sqlite.org/c3ref/column_blob.html
// https://www.sqlite.org/capi3ref.html#sqlite3_column_blob
// These routines return information about a single column of the current result row of a query.
impl<'c> SqliteValue<'c> {
/// Returns true if the value should be intrepreted as NULL.
pub(super) fn is_null(&self) -> bool {
self.r#type().is_none()
}
fn r#type(&self) -> Option<SqliteType> {
let type_code = unsafe { sqlite3_column_type(self.statement.handle(), self.index) };
// SQLITE_INTEGER, SQLITE_FLOAT, SQLITE_TEXT, SQLITE_BLOB, or SQLITE_NULL
match type_code {
SQLITE_INTEGER => Some(SqliteType::Integer),
SQLITE_FLOAT => Some(SqliteType::Float),
SQLITE_TEXT => Some(SqliteType::Text),
SQLITE_BLOB => Some(SqliteType::Blob),
SQLITE_NULL => None,
_ => unreachable!("received unexpected column type: {}", type_code),
}
}
/// Returns the 32-bit INTEGER result.
pub(super) fn int(&self) -> i32 {
unsafe { sqlite3_column_int(self.statement.handle(), self.index) }
}
/// Returns the 64-bit INTEGER result.
pub(super) fn int64(&self) -> i64 {
unsafe { sqlite3_column_int64(self.statement.handle(), self.index) }
}
/// Returns the 64-bit, REAL result.
pub(super) fn double(&self) -> f64 {
unsafe { sqlite3_column_double(self.statement.handle(), self.index) }
}
/// Returns the UTF-8 TEXT result.
pub(super) fn text(&self) -> Option<&'c str> {
unsafe {
let ptr = sqlite3_column_text(self.statement.handle(), self.index) as *const i8;
if ptr.is_null() {
None
} else {
Some(from_utf8_unchecked(CStr::from_ptr(ptr).to_bytes()))
}
}
}
fn bytes(&self) -> usize {
// Returns the size of the result in bytes.
let len = unsafe { sqlite3_column_bytes(self.statement.handle(), self.index) };
len as usize
}
/// Returns the BLOB result.
pub(super) fn blob(&self) -> &'c [u8] {
let ptr = unsafe { sqlite3_column_blob(self.statement.handle(), self.index) };
if ptr.is_null() {
// Empty BLOBs are received as null pointers
return &[];
}
unsafe { slice::from_raw_parts(ptr as *const u8, self.bytes()) }
}
}
impl<'c> RawValue<'c> for SqliteValue<'c> {
type Database = Sqlite;
fn type_info(&self) -> Option<SqliteTypeInfo> {
Some(SqliteTypeInfo {
r#type: self.r#type()?,
affinity: None,
})
}
}
| 30.857143 | 96 | 0.619136 |
0e86e7047b5dc95c824ea208eaa121e302d28784 | 39,691 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
use std::fmt::Write;
/// See [`GenerateDataSetInput`](crate::input::GenerateDataSetInput)
pub mod generate_data_set_input {
/// A builder for [`GenerateDataSetInput`](crate::input::GenerateDataSetInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_set_type: std::option::Option<crate::model::DataSetType>,
pub(crate) data_set_publication_date: std::option::Option<smithy_types::Instant>,
pub(crate) role_name_arn: std::option::Option<std::string::String>,
pub(crate) destination_s3_bucket_name: std::option::Option<std::string::String>,
pub(crate) destination_s3_prefix: std::option::Option<std::string::String>,
pub(crate) sns_topic_arn: std::option::Option<std::string::String>,
pub(crate) customer_defined_values: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>The desired data set type.</p>
/// <p>
/// <ul>
/// <li>
/// <strong>customer_subscriber_hourly_monthly_subscriptions</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>customer_subscriber_annual_subscriptions</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_usage_by_instance_type</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_fees</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_free_trial_conversions</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_new_instances</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_new_product_subscribers</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_canceled_product_subscribers</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_billing_and_revenue_data</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_annual_subscriptions</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_field_demonstration_usage</strong>
/// <p>From 2018-03-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_flexible_payment_schedule</strong>
/// <p>From 2018-11-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_product</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_instance_hours</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_customer_geo</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_age_of_uncollected_funds</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_age_of_disbursed_funds</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_age_of_past_due_funds</strong>
/// <p>From 2018-04-07 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_uncollected_funds_breakdown</strong>
/// <p>From 2019-10-04 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>sales_compensation_billed_revenue</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.</p>
/// </li>
/// <li>
/// <strong>us_sales_and_use_tax_records</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_product_with_uncollected_funds</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// <li>
/// <strong>customer_profile_by_industry</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// <li>
/// <strong>customer_profile_by_revenue</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// <li>
/// <strong>customer_profile_by_geography</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// </ul>
/// </p>
pub fn data_set_type(mut self, input: crate::model::DataSetType) -> Self {
self.data_set_type = Some(input);
self
}
pub fn set_data_set_type(
mut self,
input: std::option::Option<crate::model::DataSetType>,
) -> Self {
self.data_set_type = input;
self
}
/// The date a data set was published.
/// For daily data sets, provide a date with day-level granularity for the desired day.
/// For monthly data sets except those with prefix disbursed_amount, provide a date with month-level granularity for the desired month (the day value will be ignored).
/// For data sets with prefix disbursed_amount, provide a date with day-level granularity for the desired day. For these data sets we will look backwards in time over the range of 31 days until the first data set is found (the latest one).
pub fn data_set_publication_date(mut self, input: smithy_types::Instant) -> Self {
self.data_set_publication_date = Some(input);
self
}
pub fn set_data_set_publication_date(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.data_set_publication_date = input;
self
}
/// The Amazon Resource Name (ARN) of the Role with an attached permissions policy to interact with the provided
/// AWS services.
pub fn role_name_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.role_name_arn = Some(input.into());
self
}
pub fn set_role_name_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.role_name_arn = input;
self
}
/// The name (friendly name, not ARN) of the destination S3 bucket.
pub fn destination_s3_bucket_name(mut self, input: impl Into<std::string::String>) -> Self {
self.destination_s3_bucket_name = Some(input.into());
self
}
pub fn set_destination_s3_bucket_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.destination_s3_bucket_name = input;
self
}
/// (Optional) The desired S3 prefix for the published data set, similar to a directory path in standard file systems.
/// For example, if given the bucket name "mybucket" and the prefix "myprefix/mydatasets", the output file
/// "outputfile" would be published to "s3://mybucket/myprefix/mydatasets/outputfile".
/// If the prefix directory structure does not exist, it will be created.
/// If no prefix is provided, the data set will be published to the S3 bucket root.
pub fn destination_s3_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.destination_s3_prefix = Some(input.into());
self
}
pub fn set_destination_s3_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.destination_s3_prefix = input;
self
}
/// Amazon Resource Name (ARN) for the SNS Topic that will be notified when the data set has been published or if an
/// error has occurred.
pub fn sns_topic_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.sns_topic_arn = Some(input.into());
self
}
pub fn set_sns_topic_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.sns_topic_arn = input;
self
}
pub fn customer_defined_values(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.customer_defined_values.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.customer_defined_values = Some(hash_map);
self
}
pub fn set_customer_defined_values(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.customer_defined_values = input;
self
}
/// Consumes the builder and constructs a [`GenerateDataSetInput`](crate::input::GenerateDataSetInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GenerateDataSetInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::GenerateDataSetInput {
data_set_type: self.data_set_type,
data_set_publication_date: self.data_set_publication_date,
role_name_arn: self.role_name_arn,
destination_s3_bucket_name: self.destination_s3_bucket_name,
destination_s3_prefix: self.destination_s3_prefix,
sns_topic_arn: self.sns_topic_arn,
customer_defined_values: self.customer_defined_values,
})
}
}
}
#[doc(hidden)]
pub type GenerateDataSetInputOperationOutputAlias = crate::operation::GenerateDataSet;
#[doc(hidden)]
pub type GenerateDataSetInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GenerateDataSetInput {
/// Consumes the builder and constructs an Operation<[`GenerateDataSet`](crate::operation::GenerateDataSet)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::GenerateDataSet,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let properties = smithy_http::property_bag::SharedPropertyBag::new();
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_crate_operation_generate_data_set(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = smithy_http::operation::Request::from_parts(
request.map(smithy_http::body::SdkBody::from),
properties,
);
request.properties_mut().insert(
aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
),
);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::GenerateDataSet::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"GenerateDataSet",
"marketplacecommerceanalytics",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut builder = self.update_http_builder(http::request::Builder::new())?;
builder = smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.1",
);
builder = smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"MarketplaceCommerceAnalytics20150701.GenerateDataSet",
);
Ok(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GenerateDataSetInput`](crate::input::GenerateDataSetInput)
pub fn builder() -> crate::input::generate_data_set_input::Builder {
crate::input::generate_data_set_input::Builder::default()
}
}
/// See [`StartSupportDataExportInput`](crate::input::StartSupportDataExportInput)
pub mod start_support_data_export_input {
/// A builder for [`StartSupportDataExportInput`](crate::input::StartSupportDataExportInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_set_type: std::option::Option<crate::model::SupportDataSetType>,
pub(crate) from_date: std::option::Option<smithy_types::Instant>,
pub(crate) role_name_arn: std::option::Option<std::string::String>,
pub(crate) destination_s3_bucket_name: std::option::Option<std::string::String>,
pub(crate) destination_s3_prefix: std::option::Option<std::string::String>,
pub(crate) sns_topic_arn: std::option::Option<std::string::String>,
pub(crate) customer_defined_values: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>
/// Specifies the data set type to be written to the output csv file. The data set types customer_support_contacts_data and
/// test_customer_support_contacts_data both result in a csv file containing the following fields: Product Id, Product Code, Customer Guid,
/// Subscription Guid, Subscription Start Date, Organization, AWS Account Id, Given Name, Surname, Telephone Number, Email, Title,
/// Country Code, ZIP Code, Operation Type, and Operation Time.
/// </p>
/// <p>
/// <ul>
/// <li><i>customer_support_contacts_data</i> Customer support contact data. The data set will contain all changes (Creates, Updates, and Deletes) to customer support contact data from the date specified in the from_date parameter.</li>
/// <li><i>test_customer_support_contacts_data</i> An example data set containing static test data in the same format as customer_support_contacts_data</li>
/// </ul>
/// </p>
pub fn data_set_type(mut self, input: crate::model::SupportDataSetType) -> Self {
self.data_set_type = Some(input);
self
}
pub fn set_data_set_type(
mut self,
input: std::option::Option<crate::model::SupportDataSetType>,
) -> Self {
self.data_set_type = input;
self
}
/// The start date from which to retrieve the data set in UTC. This parameter only affects the customer_support_contacts_data data set type.
pub fn from_date(mut self, input: smithy_types::Instant) -> Self {
self.from_date = Some(input);
self
}
pub fn set_from_date(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.from_date = input;
self
}
/// The Amazon Resource Name (ARN) of the Role with an attached permissions policy to interact with the provided
/// AWS services.
pub fn role_name_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.role_name_arn = Some(input.into());
self
}
pub fn set_role_name_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.role_name_arn = input;
self
}
/// The name (friendly name, not ARN) of the destination S3 bucket.
pub fn destination_s3_bucket_name(mut self, input: impl Into<std::string::String>) -> Self {
self.destination_s3_bucket_name = Some(input.into());
self
}
pub fn set_destination_s3_bucket_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.destination_s3_bucket_name = input;
self
}
/// (Optional) The desired S3 prefix for the published data set, similar to a directory path in standard file systems.
/// For example, if given the bucket name "mybucket" and the prefix "myprefix/mydatasets", the output file
/// "outputfile" would be published to "s3://mybucket/myprefix/mydatasets/outputfile".
/// If the prefix directory structure does not exist, it will be created.
/// If no prefix is provided, the data set will be published to the S3 bucket root.
pub fn destination_s3_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.destination_s3_prefix = Some(input.into());
self
}
pub fn set_destination_s3_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.destination_s3_prefix = input;
self
}
/// Amazon Resource Name (ARN) for the SNS Topic that will be notified when the data set has been published or if an
/// error has occurred.
pub fn sns_topic_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.sns_topic_arn = Some(input.into());
self
}
pub fn set_sns_topic_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.sns_topic_arn = input;
self
}
pub fn customer_defined_values(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.customer_defined_values.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.customer_defined_values = Some(hash_map);
self
}
pub fn set_customer_defined_values(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.customer_defined_values = input;
self
}
/// Consumes the builder and constructs a [`StartSupportDataExportInput`](crate::input::StartSupportDataExportInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::StartSupportDataExportInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::StartSupportDataExportInput {
data_set_type: self.data_set_type,
from_date: self.from_date,
role_name_arn: self.role_name_arn,
destination_s3_bucket_name: self.destination_s3_bucket_name,
destination_s3_prefix: self.destination_s3_prefix,
sns_topic_arn: self.sns_topic_arn,
customer_defined_values: self.customer_defined_values,
})
}
}
}
#[doc(hidden)]
pub type StartSupportDataExportInputOperationOutputAlias = crate::operation::StartSupportDataExport;
#[doc(hidden)]
pub type StartSupportDataExportInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl StartSupportDataExportInput {
/// Consumes the builder and constructs an Operation<[`StartSupportDataExport`](crate::operation::StartSupportDataExport)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::StartSupportDataExport,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let properties = smithy_http::property_bag::SharedPropertyBag::new();
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_crate_operation_start_support_data_export(&self).map_err(|err|smithy_http::operation::BuildError::SerializationError(err.into()))?
;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request = smithy_http::operation::Request::from_parts(
request.map(smithy_http::body::SdkBody::from),
properties,
);
request.properties_mut().insert(
aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
),
);
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::StartSupportDataExport::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"StartSupportDataExport",
"marketplacecommerceanalytics",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut builder = self.update_http_builder(http::request::Builder::new())?;
builder = smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("content-type"),
"application/x-amz-json-1.1",
);
builder = smithy_http::header::set_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"MarketplaceCommerceAnalytics20150701.StartSupportDataExport",
);
Ok(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = smithy_http::header::set_header_if_absent(
builder,
http::header::CONTENT_LENGTH,
content_length,
);
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`StartSupportDataExportInput`](crate::input::StartSupportDataExportInput)
pub fn builder() -> crate::input::start_support_data_export_input::Builder {
crate::input::start_support_data_export_input::Builder::default()
}
}
/// Container for the parameters to the StartSupportDataExport operation.
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StartSupportDataExportInput {
/// <p>
/// Specifies the data set type to be written to the output csv file. The data set types customer_support_contacts_data and
/// test_customer_support_contacts_data both result in a csv file containing the following fields: Product Id, Product Code, Customer Guid,
/// Subscription Guid, Subscription Start Date, Organization, AWS Account Id, Given Name, Surname, Telephone Number, Email, Title,
/// Country Code, ZIP Code, Operation Type, and Operation Time.
/// </p>
/// <p>
/// <ul>
/// <li><i>customer_support_contacts_data</i> Customer support contact data. The data set will contain all changes (Creates, Updates, and Deletes) to customer support contact data from the date specified in the from_date parameter.</li>
/// <li><i>test_customer_support_contacts_data</i> An example data set containing static test data in the same format as customer_support_contacts_data</li>
/// </ul>
/// </p>
pub data_set_type: std::option::Option<crate::model::SupportDataSetType>,
/// The start date from which to retrieve the data set in UTC. This parameter only affects the customer_support_contacts_data data set type.
pub from_date: std::option::Option<smithy_types::Instant>,
/// The Amazon Resource Name (ARN) of the Role with an attached permissions policy to interact with the provided
/// AWS services.
pub role_name_arn: std::option::Option<std::string::String>,
/// The name (friendly name, not ARN) of the destination S3 bucket.
pub destination_s3_bucket_name: std::option::Option<std::string::String>,
/// (Optional) The desired S3 prefix for the published data set, similar to a directory path in standard file systems.
/// For example, if given the bucket name "mybucket" and the prefix "myprefix/mydatasets", the output file
/// "outputfile" would be published to "s3://mybucket/myprefix/mydatasets/outputfile".
/// If the prefix directory structure does not exist, it will be created.
/// If no prefix is provided, the data set will be published to the S3 bucket root.
pub destination_s3_prefix: std::option::Option<std::string::String>,
/// Amazon Resource Name (ARN) for the SNS Topic that will be notified when the data set has been published or if an
/// error has occurred.
pub sns_topic_arn: std::option::Option<std::string::String>,
/// (Optional) Key-value pairs which will be returned, unmodified, in the
/// Amazon SNS notification message and the data set metadata file.
pub customer_defined_values:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl std::fmt::Debug for StartSupportDataExportInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StartSupportDataExportInput");
formatter.field("data_set_type", &self.data_set_type);
formatter.field("from_date", &self.from_date);
formatter.field("role_name_arn", &self.role_name_arn);
formatter.field(
"destination_s3_bucket_name",
&self.destination_s3_bucket_name,
);
formatter.field("destination_s3_prefix", &self.destination_s3_prefix);
formatter.field("sns_topic_arn", &self.sns_topic_arn);
formatter.field("customer_defined_values", &self.customer_defined_values);
formatter.finish()
}
}
/// Container for the parameters to the GenerateDataSet operation.
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GenerateDataSetInput {
/// <p>The desired data set type.</p>
/// <p>
/// <ul>
/// <li>
/// <strong>customer_subscriber_hourly_monthly_subscriptions</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>customer_subscriber_annual_subscriptions</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_usage_by_instance_type</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_fees</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_free_trial_conversions</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_new_instances</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_new_product_subscribers</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>daily_business_canceled_product_subscribers</strong>
/// <p>From 2017-09-15 to present: Available daily by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_billing_and_revenue_data</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_annual_subscriptions</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_field_demonstration_usage</strong>
/// <p>From 2018-03-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>monthly_revenue_flexible_payment_schedule</strong>
/// <p>From 2018-11-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_product</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_instance_hours</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_customer_geo</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_age_of_uncollected_funds</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_age_of_disbursed_funds</strong>
/// <p>From 2017-09-15 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_age_of_past_due_funds</strong>
/// <p>From 2018-04-07 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_uncollected_funds_breakdown</strong>
/// <p>From 2019-10-04 to present: Available every 30 days by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>sales_compensation_billed_revenue</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.</p>
/// </li>
/// <li>
/// <strong>us_sales_and_use_tax_records</strong>
/// <p>From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.</p>
/// </li>
/// <li>
/// <strong>disbursed_amount_by_product_with_uncollected_funds</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// <li>
/// <strong>customer_profile_by_industry</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// <li>
/// <strong>customer_profile_by_revenue</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// <li>
/// <strong>customer_profile_by_geography</strong>
/// <p>This data set is deprecated. Download related reports from AMMP instead!</p>
/// </li>
/// </ul>
/// </p>
pub data_set_type: std::option::Option<crate::model::DataSetType>,
/// The date a data set was published.
/// For daily data sets, provide a date with day-level granularity for the desired day.
/// For monthly data sets except those with prefix disbursed_amount, provide a date with month-level granularity for the desired month (the day value will be ignored).
/// For data sets with prefix disbursed_amount, provide a date with day-level granularity for the desired day. For these data sets we will look backwards in time over the range of 31 days until the first data set is found (the latest one).
pub data_set_publication_date: std::option::Option<smithy_types::Instant>,
/// The Amazon Resource Name (ARN) of the Role with an attached permissions policy to interact with the provided
/// AWS services.
pub role_name_arn: std::option::Option<std::string::String>,
/// The name (friendly name, not ARN) of the destination S3 bucket.
pub destination_s3_bucket_name: std::option::Option<std::string::String>,
/// (Optional) The desired S3 prefix for the published data set, similar to a directory path in standard file systems.
/// For example, if given the bucket name "mybucket" and the prefix "myprefix/mydatasets", the output file
/// "outputfile" would be published to "s3://mybucket/myprefix/mydatasets/outputfile".
/// If the prefix directory structure does not exist, it will be created.
/// If no prefix is provided, the data set will be published to the S3 bucket root.
pub destination_s3_prefix: std::option::Option<std::string::String>,
/// Amazon Resource Name (ARN) for the SNS Topic that will be notified when the data set has been published or if an
/// error has occurred.
pub sns_topic_arn: std::option::Option<std::string::String>,
/// (Optional) Key-value pairs which will be returned, unmodified, in the
/// Amazon SNS notification message and the data set metadata file. These
/// key-value pairs can be used to correlated responses with tracking
/// information from other systems.
pub customer_defined_values:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl std::fmt::Debug for GenerateDataSetInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GenerateDataSetInput");
formatter.field("data_set_type", &self.data_set_type);
formatter.field("data_set_publication_date", &self.data_set_publication_date);
formatter.field("role_name_arn", &self.role_name_arn);
formatter.field(
"destination_s3_bucket_name",
&self.destination_s3_bucket_name,
);
formatter.field("destination_s3_prefix", &self.destination_s3_prefix);
formatter.field("sns_topic_arn", &self.sns_topic_arn);
formatter.field("customer_defined_values", &self.customer_defined_values);
formatter.finish()
}
}
| 48.344702 | 247 | 0.621249 |
d5c888be8d5fe20cd8d69b3c46c938ecf2727006 | 2,616 | // Copyright takubokudori.
// This source code is licensed under the MIT or Apache-2.0 license.
#![allow(clippy::missing_safety_doc)]
use crate::{
raw::{
RtlInitAnsiString, RtlInitUnicodeString, ANSI_STRING, UNICODE_STRING,
},
AStr, WStr,
};
use core::ops;
/// Represents [UNICODE_STRING](https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string).
#[repr(C)]
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct UnicodeString<'a> {
us: UNICODE_STRING,
s: &'a WStr,
}
impl<'a> UnicodeString<'a> {
/// Creates UnicodeString.
pub fn new(s: &'a WStr) -> Self {
let mut us = UNICODE_STRING {
Length: 0,
MaximumLength: 0,
Buffer: 0 as _,
};
unsafe {
RtlInitUnicodeString(&mut us, s.as_ptr());
}
Self { us, s }
}
/// Returns &[`UNICODE_STRING`].
pub fn as_raw(&self) -> &UNICODE_STRING { &self.us }
/// Returns &mut [`UNICODE_STRING`].
pub unsafe fn as_mut_raw(&mut self) -> &mut UNICODE_STRING { &mut self.us }
/// Returns *const [`UNICODE_STRING`].
pub fn as_ptr(&self) -> *const UNICODE_STRING { &self.us as _ }
/// Returns *mut [`UNICODE_STRING`].
pub fn as_mut_ptr(&mut self) -> *mut UNICODE_STRING { &mut self.us as _ }
}
impl<'a> ops::Deref for UnicodeString<'a> {
type Target = WStr;
fn deref(&self) -> &Self::Target { self.s }
}
/// Represents [ANSI_STRING](https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-string).
#[repr(C)]
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct AnsiString<'a> {
us: ANSI_STRING,
s: &'a AStr,
}
impl<'a> AnsiString<'a> {
/// Creates AnsiString.
pub fn new(s: &'a AStr) -> Self {
let mut us = ANSI_STRING {
Length: 0,
MaximumLength: 0,
Buffer: 0 as _,
};
unsafe {
RtlInitAnsiString(&mut us, s.as_ptr());
}
Self { us, s }
}
/// Returns &[`ANSI_STRING`].
pub fn as_raw(&self) -> &ANSI_STRING { &self.us }
/// Returns &mut [`ANSI_STRING`].
pub unsafe fn as_mut_raw(&mut self) -> &mut ANSI_STRING { &mut self.us }
/// Returns *const [`ANSI_STRING`].
pub fn as_ptr(&self) -> *const ANSI_STRING { &self.us as _ }
/// Returns *mut [`ANSI_STRING`].
pub fn as_mut_ptr(&mut self) -> *mut ANSI_STRING { &mut self.us as _ }
}
impl<'a> ops::Deref for AnsiString<'a> {
type Target = AStr;
fn deref(&self) -> &Self::Target { self.s }
}
| 28.129032 | 116 | 0.564985 |
fe0f830af08b111709a6afbeaf708a7d70e1603f | 37,098 | // Take a look at the license at the top of the repository in the LICENSE file.
// rustdoc-stripper-ignore-next
//! Traits intended for subclassing [`CellArea`](crate::CellArea).
use crate::subclass::prelude::*;
use crate::{
CellArea, CellAreaContext, CellRenderer, CellRendererState, DirectionType, SizeRequestMode,
Snapshot, TreeIter, TreeModel, Widget,
};
use glib::translate::*;
use glib::{Cast, IsA, ParamSpec, Value};
use std::mem;
#[derive(Debug)]
pub struct CellCallback {
callback: ffi::GtkCellCallback,
user_data: glib::ffi::gpointer,
}
impl CellCallback {
pub fn call<R: IsA<CellRenderer>>(&self, cell_renderer: &R) -> bool {
unsafe {
if let Some(callback) = self.callback {
from_glib(callback(
cell_renderer.as_ref().to_glib_none().0,
self.user_data,
))
} else {
// true to stop iterating over cells
true
}
}
}
}
#[derive(Debug)]
pub struct CellCallbackAllocate {
callback: ffi::GtkCellAllocCallback,
user_data: glib::ffi::gpointer,
}
impl CellCallbackAllocate {
pub fn call<R: IsA<CellRenderer>>(
&self,
cell_renderer: &R,
cell_area: &gdk::Rectangle,
cell_background: &gdk::Rectangle,
) -> bool {
unsafe {
if let Some(callback) = self.callback {
from_glib(callback(
cell_renderer.as_ref().to_glib_none().0,
cell_area.to_glib_none().0,
cell_background.to_glib_none().0,
self.user_data,
))
} else {
// true to stop iterating over cells
true
}
}
}
}
pub trait CellAreaImpl: CellAreaImplExt + ObjectImpl {
fn cell_properties() -> &'static [ParamSpec] {
&[]
}
fn set_cell_property<R: IsA<CellRenderer>>(
&self,
_obj: &Self::Type,
_renderer: &R,
_id: usize,
_value: &Value,
_pspec: &ParamSpec,
) {
unimplemented!()
}
fn cell_property<R: IsA<CellRenderer>>(
&self,
_obj: &Self::Type,
_renderer: &R,
_id: usize,
_pspec: &ParamSpec,
) -> Value {
unimplemented!()
}
fn activate<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
area: &gdk::Rectangle,
flags: CellRendererState,
edit_only: bool,
) -> bool {
self.parent_activate(cell_area, context, widget, area, flags, edit_only)
}
fn add<R: IsA<CellRenderer>>(&self, cell_area: &Self::Type, renderer: &R) {
self.parent_add(cell_area, renderer)
}
fn apply_attributes<M: IsA<TreeModel>>(
&self,
cell_area: &Self::Type,
tree_model: &M,
iter: &TreeIter,
is_expander: bool,
is_expanded: bool,
) {
self.parent_apply_attributes(cell_area, tree_model, iter, is_expander, is_expanded)
}
fn create_context(&self, cell_area: &Self::Type) -> Option<CellAreaContext> {
self.parent_create_context(cell_area)
}
fn copy_context<P: IsA<CellAreaContext>>(
&self,
cell_area: &Self::Type,
context: &P,
) -> Option<CellAreaContext> {
self.parent_copy_context(cell_area, context)
}
fn event<W: IsA<Widget>, P: IsA<CellAreaContext>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
event: &gdk::Event,
area: &gdk::Rectangle,
flags: CellRendererState,
) -> bool {
self.parent_event(cell_area, context, widget, event, area, flags)
}
fn foreach(&self, cell_area: &Self::Type, callback: &CellCallback) {
self.parent_foreach(cell_area, callback);
}
fn foreach_alloc<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_type: &Self::Type,
context: &P,
widget: &W,
area: &gdk::Rectangle,
bg_area: &gdk::Rectangle,
callback: &CellCallbackAllocate,
) {
self.parent_foreach_alloc(cell_type, context, widget, area, bg_area, callback)
}
fn remove<R: IsA<CellRenderer>>(&self, cell_area: &Self::Type, renderer: &R) {
self.parent_remove(cell_area, renderer)
}
fn is_activatable(&self, cell_area: &Self::Type) -> bool {
self.parent_is_activatable(cell_area)
}
fn focus(&self, cell_area: &Self::Type, direction_type: DirectionType) -> bool {
self.parent_focus(cell_area, direction_type)
}
fn request_mode(&self, cell_area: &Self::Type) -> SizeRequestMode {
self.parent_request_mode(cell_area)
}
fn preferred_width<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
) -> (i32, i32) {
self.parent_preferred_width(cell_area, context, widget)
}
fn preferred_width_for_height<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
height: i32,
) -> (i32, i32) {
self.parent_preferred_width_for_height(cell_area, context, widget, height)
}
fn preferred_height<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
) -> (i32, i32) {
self.parent_preferred_height(cell_area, context, widget)
}
fn preferred_height_for_width<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
width: i32,
) -> (i32, i32) {
self.parent_preferred_height_for_width(cell_area, context, widget, width)
}
fn snapshot<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
snapshot: &Snapshot,
widget: &W,
background_area: &gdk::Rectangle,
cellarea: &gdk::Rectangle,
flags: CellRendererState,
paint_focus: bool,
) {
self.parent_snapshot(
cell_area,
context,
snapshot,
widget,
background_area,
cellarea,
flags,
paint_focus,
);
}
}
pub trait CellAreaImplExt: ObjectSubclass {
fn parent_activate<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
area: &gdk::Rectangle,
flags: CellRendererState,
edit_only: bool,
) -> bool;
fn parent_add<R: IsA<CellRenderer>>(&self, cell_area: &Self::Type, renderer: &R);
fn parent_apply_attributes<M: IsA<TreeModel>>(
&self,
cell_area: &Self::Type,
tree_model: &M,
iter: &TreeIter,
is_expander: bool,
is_expanded: bool,
);
fn parent_create_context(&self, cell_area: &Self::Type) -> Option<CellAreaContext>;
fn parent_copy_context<P: IsA<CellAreaContext>>(
&self,
cell_area: &Self::Type,
context: &P,
) -> Option<CellAreaContext>;
fn parent_event<W: IsA<Widget>, P: IsA<CellAreaContext>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
event: &gdk::Event,
area: &gdk::Rectangle,
flags: CellRendererState,
) -> bool;
fn parent_foreach(&self, cell_area: &Self::Type, callback: &CellCallback);
fn parent_foreach_alloc<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_type: &Self::Type,
context: &P,
widget: &W,
area: &gdk::Rectangle,
bg_area: &gdk::Rectangle,
callback: &CellCallbackAllocate,
);
fn parent_remove<R: IsA<CellRenderer>>(&self, cell_area: &Self::Type, renderer: &R);
fn parent_is_activatable(&self, cell_area: &Self::Type) -> bool;
fn parent_focus(&self, cell_area: &Self::Type, direction_type: DirectionType) -> bool;
fn parent_request_mode(&self, cell_area: &Self::Type) -> SizeRequestMode;
fn parent_preferred_width<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
) -> (i32, i32);
fn parent_preferred_height<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
) -> (i32, i32);
fn parent_preferred_width_for_height<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
height: i32,
) -> (i32, i32);
fn parent_preferred_height_for_width<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
width: i32,
) -> (i32, i32);
fn parent_snapshot<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
snapshot: &Snapshot,
widget: &W,
background_area: &gdk::Rectangle,
cellarea: &gdk::Rectangle,
flags: CellRendererState,
paint_focus: bool,
);
}
impl<T: CellAreaImpl> CellAreaImplExt for T {
fn parent_activate<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
area: &gdk::Rectangle,
flags: CellRendererState,
edit_only: bool,
) -> bool {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).activate {
from_glib(f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
area.to_glib_none().0,
flags.into_glib(),
edit_only.into_glib(),
))
} else {
false
}
}
}
fn parent_add<R: IsA<CellRenderer>>(&self, cell_area: &Self::Type, renderer: &R) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).add {
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
renderer.as_ref().to_glib_none().0,
)
}
}
}
fn parent_apply_attributes<M: IsA<TreeModel>>(
&self,
cell_area: &Self::Type,
tree_model: &M,
iter: &TreeIter,
is_expander: bool,
is_expanded: bool,
) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).apply_attributes {
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
tree_model.as_ref().to_glib_none().0,
iter.to_glib_none().0 as *mut _,
is_expander.into_glib(),
is_expanded.into_glib(),
)
}
}
}
fn parent_create_context(&self, cell_area: &Self::Type) -> Option<CellAreaContext> {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
let f = (*parent_class)
.create_context
.expect("No parent class impl for \"create_context\"");
let ret = f(cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0);
Some(from_glib_full(ret))
}
}
fn parent_copy_context<P: IsA<CellAreaContext>>(
&self,
cell_area: &Self::Type,
context: &P,
) -> Option<CellAreaContext> {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
let f = (*parent_class)
.copy_context
.expect("No parent class impl for \"copy_context\"");
let ret = f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
context.as_ref().to_glib_none().0,
);
Some(from_glib_full(ret))
}
}
fn parent_event<W: IsA<Widget>, P: IsA<CellAreaContext>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
event: &gdk::Event,
area: &gdk::Rectangle,
flags: CellRendererState,
) -> bool {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).event {
from_glib(f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
event.to_glib_none().0,
area.to_glib_none().0,
flags.into_glib(),
))
} else {
// returns true only if the event is handled
false
}
}
}
fn parent_foreach(&self, cell_area: &Self::Type, callback: &CellCallback) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).foreach {
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
callback.callback,
callback.user_data,
)
}
}
}
fn parent_foreach_alloc<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
widget: &W,
area: &gdk::Rectangle,
bg_area: &gdk::Rectangle,
callback: &CellCallbackAllocate,
) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).foreach_alloc {
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
area.to_glib_none().0,
bg_area.to_glib_none().0,
callback.callback,
callback.user_data,
)
}
}
}
fn parent_remove<R: IsA<CellRenderer>>(&self, cell_area: &Self::Type, renderer: &R) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).remove {
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
renderer.as_ref().to_glib_none().0,
)
}
}
}
fn parent_is_activatable(&self, cell_area: &Self::Type) -> bool {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).is_activatable {
from_glib(f(cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0))
} else {
false
}
}
}
fn parent_focus(&self, cell_area: &Self::Type, direction_type: DirectionType) -> bool {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).focus {
from_glib(f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
direction_type.into_glib(),
))
} else {
false
}
}
}
fn parent_request_mode(&self, cell_area: &Self::Type) -> SizeRequestMode {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
let f = (*parent_class)
.get_request_mode
.expect("No parent class impl for \"get_request_mode\"");
from_glib(f(cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0))
}
}
fn parent_preferred_width<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
cell_area_context: &P,
widget: &W,
) -> (i32, i32) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
let f = (*parent_class).get_preferred_width.unwrap();
let mut minimum_size = mem::MaybeUninit::uninit();
let mut natural_size = mem::MaybeUninit::uninit();
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
cell_area_context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
minimum_size.as_mut_ptr(),
natural_size.as_mut_ptr(),
);
(minimum_size.assume_init(), natural_size.assume_init())
}
}
fn parent_preferred_height<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
cell_area_context: &P,
widget: &W,
) -> (i32, i32) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
let f = (*parent_class).get_preferred_height.unwrap();
let mut minimum_size = mem::MaybeUninit::uninit();
let mut natural_size = mem::MaybeUninit::uninit();
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
cell_area_context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
minimum_size.as_mut_ptr(),
natural_size.as_mut_ptr(),
);
(minimum_size.assume_init(), natural_size.assume_init())
}
}
fn parent_preferred_width_for_height<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
cell_area_context: &P,
widget: &W,
height: i32,
) -> (i32, i32) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
let f = (*parent_class).get_preferred_width_for_height.unwrap();
let mut minimum_size = mem::MaybeUninit::uninit();
let mut natural_size = mem::MaybeUninit::uninit();
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
cell_area_context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
height,
minimum_size.as_mut_ptr(),
natural_size.as_mut_ptr(),
);
(minimum_size.assume_init(), natural_size.assume_init())
}
}
fn parent_preferred_height_for_width<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
cell_area_context: &P,
widget: &W,
width: i32,
) -> (i32, i32) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
let f = (*parent_class).get_preferred_height_for_width.unwrap();
let mut minimum_size = mem::MaybeUninit::uninit();
let mut natural_size = mem::MaybeUninit::uninit();
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
cell_area_context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
width,
minimum_size.as_mut_ptr(),
natural_size.as_mut_ptr(),
);
(minimum_size.assume_init(), natural_size.assume_init())
}
}
fn parent_snapshot<P: IsA<CellAreaContext>, W: IsA<Widget>>(
&self,
cell_area: &Self::Type,
context: &P,
snapshot: &Snapshot,
widget: &W,
background_area: &gdk::Rectangle,
cellarea: &gdk::Rectangle,
flags: CellRendererState,
paint_focus: bool,
) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkCellAreaClass;
if let Some(f) = (*parent_class).snapshot {
f(
cell_area.unsafe_cast_ref::<CellArea>().to_glib_none().0,
context.as_ref().to_glib_none().0,
widget.as_ref().to_glib_none().0,
snapshot.to_glib_none().0,
background_area.to_glib_none().0,
cellarea.to_glib_none().0,
flags.into_glib(),
paint_focus.into_glib(),
)
}
}
}
}
unsafe impl<T: CellAreaImpl> IsSubclassable<T> for CellArea {
fn class_init(class: &mut glib::Class<Self>) {
Self::parent_class_init::<T>(class);
let klass = class.as_mut();
assert!(
crate::rt::is_initialized(),
"GTK has to be initialized first"
);
let pspecs = <T as CellAreaImpl>::cell_properties();
if !pspecs.is_empty() {
unsafe {
for (prop_id, pspec) in pspecs.iter().enumerate() {
ffi::gtk_cell_area_class_install_cell_property(
klass,
prop_id as u32,
pspec.to_glib_none().0,
);
}
}
}
klass.activate = Some(cell_area_activate::<T>);
klass.add = Some(cell_area_add::<T>);
klass.apply_attributes = Some(cell_area_apply_attributes::<T>);
klass.create_context = Some(cell_area_create_context::<T>);
klass.copy_context = Some(cell_area_copy_context::<T>);
klass.event = Some(cell_area_event::<T>);
klass.foreach = Some(cell_area_foreach::<T>);
klass.foreach_alloc = Some(cell_area_foreach_alloc::<T>);
klass.remove = Some(cell_area_remove::<T>);
klass.is_activatable = Some(cell_area_is_activatable::<T>);
klass.focus = Some(cell_area_focus::<T>);
klass.get_request_mode = Some(cell_area_get_request_mode::<T>);
klass.get_preferred_width = Some(cell_area_get_preferred_width::<T>);
klass.get_preferred_width_for_height = Some(cell_area_get_preferred_width_for_height::<T>);
klass.get_preferred_height = Some(cell_area_get_preferred_height::<T>);
klass.get_preferred_height_for_width = Some(cell_area_get_preferred_height_for_width::<T>);
klass.snapshot = Some(cell_area_snapshot::<T>);
klass.set_cell_property = Some(cell_area_set_cell_property::<T>);
klass.get_cell_property = Some(cell_area_get_cell_property::<T>);
}
}
unsafe extern "C" fn cell_area_set_cell_property<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
rendererptr: *mut ffi::GtkCellRenderer,
id: u32,
valueptr: *mut glib::gobject_ffi::GValue,
pspecptr: *mut glib::gobject_ffi::GParamSpec,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
imp.set_cell_property(
from_glib_borrow::<_, CellArea>(ptr).unsafe_cast_ref(),
&*from_glib_borrow::<_, CellRenderer>(rendererptr),
id as usize,
&*(valueptr as *mut Value),
&from_glib_borrow(pspecptr),
);
}
unsafe extern "C" fn cell_area_get_cell_property<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
rendererptr: *mut ffi::GtkCellRenderer,
id: u32,
valueptr: *mut glib::gobject_ffi::GValue,
pspecptr: *mut glib::gobject_ffi::GParamSpec,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let value = imp.cell_property(
from_glib_borrow::<_, CellArea>(ptr).unsafe_cast_ref(),
&*from_glib_borrow::<_, CellRenderer>(rendererptr),
id as usize,
&from_glib_borrow(pspecptr),
);
// See glib::subclass::ObjectImpl::property for the reasoning behind
glib::gobject_ffi::g_value_unset(valueptr);
let value = mem::ManuallyDrop::new(value);
std::ptr::write(valueptr, std::ptr::read(value.to_glib_none().0));
}
unsafe extern "C" fn cell_area_add<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
rendererptr: *mut ffi::GtkCellRenderer,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let renderer: Borrowed<CellRenderer> = from_glib_borrow(rendererptr);
imp.add(wrap.unsafe_cast_ref(), &*renderer)
}
unsafe extern "C" fn cell_area_apply_attributes<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
modelptr: *mut ffi::GtkTreeModel,
iterptr: *mut ffi::GtkTreeIter,
is_expander: glib::ffi::gboolean,
is_expanded: glib::ffi::gboolean,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let model: Borrowed<TreeModel> = from_glib_borrow(modelptr);
let iter: Borrowed<TreeIter> = from_glib_borrow(iterptr);
imp.apply_attributes(
wrap.unsafe_cast_ref(),
&*model,
&iter,
from_glib(is_expander),
from_glib(is_expanded),
)
}
unsafe extern "C" fn cell_area_remove<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
rendererptr: *mut ffi::GtkCellRenderer,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let renderer: Borrowed<CellRenderer> = from_glib_borrow(rendererptr);
imp.remove(wrap.unsafe_cast_ref(), &*renderer)
}
unsafe extern "C" fn cell_area_is_activatable<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
) -> glib::ffi::gboolean {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
imp.is_activatable(wrap.unsafe_cast_ref()).into_glib()
}
unsafe extern "C" fn cell_area_focus<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
directionptr: ffi::GtkDirectionType,
) -> glib::ffi::gboolean {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
imp.focus(wrap.unsafe_cast_ref(), from_glib(directionptr))
.into_glib()
}
unsafe extern "C" fn cell_area_get_request_mode<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
) -> ffi::GtkSizeRequestMode {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
imp.request_mode(wrap.unsafe_cast_ref()).into_glib()
}
unsafe extern "C" fn cell_area_get_preferred_height<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
wdgtptr: *mut ffi::GtkWidget,
minptr: *mut libc::c_int,
natptr: *mut libc::c_int,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(wdgtptr);
let (min_size, nat_size) = imp.preferred_height(wrap.unsafe_cast_ref(), &*context, &*widget);
if !minptr.is_null() {
*minptr = min_size;
}
if !natptr.is_null() {
*natptr = nat_size;
}
}
unsafe extern "C" fn cell_area_get_preferred_width<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
wdgtptr: *mut ffi::GtkWidget,
minptr: *mut libc::c_int,
natptr: *mut libc::c_int,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(wdgtptr);
let (min_size, nat_size) = imp.preferred_width(wrap.unsafe_cast_ref(), &*context, &*widget);
if !minptr.is_null() {
*minptr = min_size;
}
if !natptr.is_null() {
*natptr = nat_size;
}
}
unsafe extern "C" fn cell_area_get_preferred_width_for_height<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
wdgtptr: *mut ffi::GtkWidget,
height: i32,
min_width_ptr: *mut libc::c_int,
nat_width_ptr: *mut libc::c_int,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(wdgtptr);
let (min_width, nat_width) =
imp.preferred_width_for_height(wrap.unsafe_cast_ref(), &*context, &*widget, height);
if !min_width_ptr.is_null() {
*min_width_ptr = min_width;
}
if !nat_width_ptr.is_null() {
*nat_width_ptr = nat_width;
}
}
unsafe extern "C" fn cell_area_get_preferred_height_for_width<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
wdgtptr: *mut ffi::GtkWidget,
width: i32,
min_height_ptr: *mut libc::c_int,
nat_height_ptr: *mut libc::c_int,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(wdgtptr);
let (min_height, nat_height) =
imp.preferred_height_for_width(wrap.unsafe_cast_ref(), &*context, &*widget, width);
if !min_height_ptr.is_null() {
*min_height_ptr = min_height;
}
if !nat_height_ptr.is_null() {
*nat_height_ptr = nat_height;
}
}
unsafe extern "C" fn cell_area_activate<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
wdgtptr: *mut ffi::GtkWidget,
cellptr: *const gdk::ffi::GdkRectangle,
flags: ffi::GtkCellRendererState,
edit_only: glib::ffi::gboolean,
) -> glib::ffi::gboolean {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(wdgtptr);
imp.activate(
wrap.unsafe_cast_ref(),
&*context,
&*widget,
&from_glib_borrow(cellptr),
from_glib(flags),
from_glib(edit_only),
)
.into_glib()
}
unsafe extern "C" fn cell_area_snapshot<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
wdgtptr: *mut ffi::GtkWidget,
snapshotptr: *mut ffi::GtkSnapshot,
bgptr: *const gdk::ffi::GdkRectangle,
cellptr: *const gdk::ffi::GdkRectangle,
flags: ffi::GtkCellRendererState,
paint_focus: glib::ffi::gboolean,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(wdgtptr);
let snapshot: Borrowed<Snapshot> = from_glib_borrow(snapshotptr);
imp.snapshot(
wrap.unsafe_cast_ref(),
&*context,
&snapshot,
&*widget,
&from_glib_borrow(bgptr),
&from_glib_borrow(cellptr),
from_glib(flags),
from_glib(paint_focus),
)
}
unsafe extern "C" fn cell_area_create_context<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
) -> *mut ffi::GtkCellAreaContext {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
imp.create_context(wrap.unsafe_cast_ref()).to_glib_full()
}
unsafe extern "C" fn cell_area_copy_context<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
) -> *mut ffi::GtkCellAreaContext {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
imp.copy_context(wrap.unsafe_cast_ref(), &*context)
.to_glib_full()
}
unsafe extern "C" fn cell_area_event<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
widgetptr: *mut ffi::GtkWidget,
eventptr: *mut gdk::ffi::GdkEvent,
rectangleptr: *const gdk::ffi::GdkRectangle,
flags: ffi::GtkCellRendererState,
) -> glib::ffi::gboolean {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(widgetptr);
let event: Borrowed<gdk::Event> = from_glib_borrow(eventptr);
let rectangle: Borrowed<gdk::Rectangle> = from_glib_borrow(rectangleptr);
imp.event(
wrap.unsafe_cast_ref(),
&*context,
&*widget,
&event,
&rectangle,
from_glib(flags),
)
.into_glib()
}
unsafe extern "C" fn cell_area_foreach<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
callback: ffi::GtkCellCallback,
user_data: glib::ffi::gpointer,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let callback = CellCallback {
callback,
user_data,
};
imp.foreach(wrap.unsafe_cast_ref(), &callback)
}
unsafe extern "C" fn cell_area_foreach_alloc<T: CellAreaImpl>(
ptr: *mut ffi::GtkCellArea,
contextptr: *mut ffi::GtkCellAreaContext,
widgetptr: *mut ffi::GtkWidget,
areaptr: *const gdk::ffi::GdkRectangle,
rectangleptr: *const gdk::ffi::GdkRectangle,
callback: ffi::GtkCellAllocCallback,
user_data: glib::ffi::gpointer,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<CellArea> = from_glib_borrow(ptr);
let context: Borrowed<CellAreaContext> = from_glib_borrow(contextptr);
let widget: Borrowed<Widget> = from_glib_borrow(widgetptr);
let rectangle: Borrowed<gdk::Rectangle> = from_glib_borrow(rectangleptr);
let area: Borrowed<gdk::Rectangle> = from_glib_borrow(areaptr);
let callback = CellCallbackAllocate {
callback,
user_data,
};
imp.foreach_alloc(
wrap.unsafe_cast_ref(),
&*context,
&*widget,
&area,
&rectangle,
&callback,
)
}
#[allow(clippy::missing_safety_doc)]
pub unsafe trait CellAreaClassSubclassExt: ClassStruct {
#[doc(alias = "gtk_cell_area_class_find_cell_property")]
fn find_cell_property(&self, property_name: &str) -> Option<ParamSpec> {
unsafe {
let cell_area_class = self as *const _ as *mut ffi::GtkCellAreaClass;
from_glib_none(ffi::gtk_cell_area_class_find_cell_property(
cell_area_class,
property_name.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_cell_area_class_list_cell_properties")]
fn list_cell_properties(&self) -> Vec<ParamSpec> {
unsafe {
let cell_area_class = self as *const _ as *mut ffi::GtkCellAreaClass;
let mut n_properties = std::mem::MaybeUninit::uninit();
let props = ffi::gtk_cell_area_class_list_cell_properties(
cell_area_class,
n_properties.as_mut_ptr(),
);
FromGlibContainer::from_glib_none_num(props, n_properties.assume_init() as usize)
}
}
}
unsafe impl<T: ClassStruct> CellAreaClassSubclassExt for T where T::Type: CellAreaImpl {}
| 33.123214 | 99 | 0.58704 |
bbfc12d89d71e8776b96784f9954fba4ee81d41d | 3,261 | //TODO: ADD DOCUMENTATION
use std::io;
use std::io::Write;
use std::str::FromStr;
const OPERATORS: &str = "Available Operators:\n\
+ Addition\n\
- Subtraction\n\
* Multiplication\n\
/ Division\n";
struct Numbers {
x: f64,
y: f64,
}
impl Numbers {
fn new(x: f64, y: f64) -> Self {
Numbers { x, y }
}
fn add(&self) -> f64 {
self.x + self.y
}
fn subtract(&self) -> f64 {
self.x - self.y
}
fn multiply(&self) -> f64 {
self.x * self.y
}
fn divide(&self) -> f64 {
self.x / self.y
}
fn display_math(&self, symbol: char, result: f64) {
println!("{} {} {} = {}", self.x, symbol, self.y, result)
}
fn do_math(&self, symbol: char) {
match symbol {
'+' => {
self.display_math(symbol, self.add());
}
'-' => {
self.display_math(symbol, self.subtract());
}
'*' => {
self.display_math(symbol, self.multiply());
}
'/' => {
self.display_math(symbol, self.divide());
}
_ => (),
}
}
}
fn input<T: FromStr>() -> T {
let mut output = String::new();
io::stdin().read_line(&mut output).unwrap();
auto_parse(output)
}
fn auto_parse<T: FromStr>(input: String) -> T {
let output: T = input.trim().parse().unwrap_or_else(|_| {
eprintln!("Can you input whats asked? Thanks.");
panic!();
});
output
}
fn main() {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 1 {
print!(
"Welcome to ZigTag's calculator\n\
Please enter a symbol: "
);
io::stdout().flush().unwrap();
let mut symbol: char;
loop {
symbol = input();
if !((symbol == '+') || (symbol == '-') || (symbol == '*') || (symbol == '/')) {
print!("{}", OPERATORS);
} else {
break;
}
print!("\nPlease enter a symbol: ");
io::stdout().flush().unwrap();
}
print!("Now enter your first number: ");
io::stdout().flush().unwrap();
let first_number: f64 = input();
print!("Now enter your second number: ");
io::stdout().flush().unwrap();
let second_number: f64 = input();
let numbers = Numbers::new(first_number, second_number);
numbers.do_math(symbol);
} else if args.len() == 2 {
if args[1] == "-h" || args[1] == "--help" {
println!(
"ZigTag's Calculator\n\
Usage: calculator <first_number> <second_number> <symbol>\n\
{}",
OPERATORS
)
}
} else if args.len() <= 3 {
println!("You need 3 arguments.");
} else if args.len() == 4 {
let first_number: f64 = auto_parse(args[1].clone());
let second_number: f64 = auto_parse(args[2].clone());
let symbol: char = auto_parse(args[3].clone());
let numbers = Numbers::new(first_number, second_number);
numbers.do_math(symbol);
} else {
println!("Too many arguments.")
}
}
| 25.476563 | 92 | 0.472861 |
fe32e2ab7139630bca539e9756ad32fde0b322a3 | 16,152 | /// Ballot Leader Election algorithm for electing new leaders
use crate::{
util::defaults::{BLE_BUFFER_SIZE as DEFAULT_BUFFER_SIZE, *},
utils::{
hocon_kv::{BLE_BUFFER_SIZE, HB_DELAY, INITIAL_DELAY, LOG_FILE_PATH, PEERS, PID, PRIORITY},
logger::create_logger,
},
};
use hocon::Hocon;
use messages::{BLEMessage, HeartbeatMsg, HeartbeatReply, HeartbeatRequest};
use slog::{debug, info, trace, warn, Logger};
use serde::{Serialize, Deserialize};
/// Used to define an epoch
#[derive(Clone, Copy, Eq, Debug, Default, Ord, PartialOrd, PartialEq, Serialize, Deserialize)]
pub struct Ballot {
/// Ballot number
pub n: u32,
/// Custom priority parameter
pub priority: u64,
/// The pid of the process
pub pid: u64,
}
impl Ballot {
/// Creates a new Ballot
/// # Arguments
/// * `n` - Ballot number.
/// * `priority` - Custom priority parameter.
/// * `pid` - Used as tiebreaker for total ordering of ballots.
pub fn with(n: u32, priority: u64, pid: u64) -> Ballot {
Ballot { n, priority, pid }
}
}
/// A Ballot Leader Election component. Used in conjunction with Omni-Paxos handles the election of a leader for a group of omni-paxos replicas,
/// incoming messages and produces outgoing messages that the user has to fetch periodically and send using a network implementation.
/// User also has to periodically fetch the decided entries that are guaranteed to be strongly consistent and linearizable, and therefore also safe to be used in the higher level application.
pub struct BallotLeaderElection {
/// Process identifier used to uniquely identify this instance.
pid: u64,
/// Vector that holds all the other replicas.
peers: Vec<u64>,
/// The current round of the heartbeat cycle.
hb_round: u32,
/// Vector which holds all the received ballots.
ballots: Vec<(Ballot, bool)>,
/// Holds the current ballot of this instance.
current_ballot: Ballot, // (round, pid)
/// States if the instance is a candidate to become a leader.
majority_connected: bool,
/// Current elected leader.
leader: Option<Ballot>,
/// Internal delay used for timeout.
hb_current_delay: u64,
/// How long time is waited before timing out on a Heartbeat response and possibly resulting in a leader-change. Measured in number of times [`tick()`] is called.
hb_delay: u64,
/// The majority of replicas inside a cluster. It is measured in ticks.
majority: usize,
/// A factor used in the beginning for a shorter hb_delay.
/// Used to faster elect a leader when starting up.
/// If used, then hb_delay is set to hb_delay/initial_delay_factor until the first leader is elected.
initial_delay: Option<u64>,
/// Internal timer which simulates the passage of time.
ticks_elapsed: u64,
/// Vector which holds all the outgoing messages of the BLE instance.
outgoing: Vec<BLEMessage>,
/// Logger used to output the status of the component.
logger: Logger,
}
impl BallotLeaderElection {
/// Construct a new BallotLeaderElection node
pub fn with(config: BLEConfig) -> Self {
let pid = config.pid;
let peers = config.peers;
let n = &peers.len() + 1;
let initial_ballot = match config.initial_leader {
Some(leader_ballot) if leader_ballot.pid == pid => leader_ballot,
_ => Ballot::with(0, config.priority.unwrap_or_default(), pid),
};
let path = config.logger_file_path;
let l = config.logger.unwrap_or_else(|| {
let s = path.unwrap_or_else(|| format!("logs/paxos_{}.log", pid));
create_logger(s.as_str())
});
let hb_delay = config.hb_delay;
info!(l, "Ballot Leader Election component pid: {} created!", pid);
BallotLeaderElection {
pid,
majority: n / 2 + 1, // +1 because peers is exclusive ourselves
peers,
hb_round: 0,
ballots: Vec::with_capacity(n),
current_ballot: initial_ballot,
majority_connected: true,
leader: config.initial_leader,
hb_current_delay: hb_delay,
hb_delay,
initial_delay: config.initial_delay,
ticks_elapsed: 0,
outgoing: vec![],
logger: l,
}
}
/// Update the custom priority used in the Ballot for this server.
pub fn set_priority(&mut self, p: u64) {
self.current_ballot.priority = p;
}
/// Returns outgoing messages
pub fn get_outgoing_msgs(&mut self) -> Vec<BLEMessage> {
std::mem::take(&mut self.outgoing)
}
/// Returns the currently elected leader.
pub fn get_leader(&self) -> Option<Ballot> {
self.leader
}
/// Tick is run by all servers to simulate the passage of time
/// If one wishes to have hb_delay of 500ms, one can set a periodic timer of 100ms to call tick(). After 5 calls to this function, the timeout will occur.
/// Returns an Option with the elected leader otherwise None
pub fn tick(&mut self) -> Option<Ballot> {
self.ticks_elapsed += 1;
if self.ticks_elapsed >= self.hb_current_delay {
self.ticks_elapsed = 0;
self.hb_timeout()
} else {
None
}
}
/// Handle an incoming message.
/// # Arguments
/// * `m` - the message to be handled.
pub fn handle(&mut self, m: BLEMessage) {
match m.msg {
HeartbeatMsg::Request(req) => self.handle_request(m.from, req),
HeartbeatMsg::Reply(rep) => self.handle_reply(rep),
}
}
/// Sets initial state after creation. *Must only be used before being started*.
/// # Arguments
/// * `leader_ballot` - Initial leader.
pub fn set_initial_leader(&mut self, leader_ballot: Ballot) {
assert!(self.leader.is_none());
if leader_ballot.pid == self.pid {
self.current_ballot = leader_ballot;
self.majority_connected = true;
}
self.leader = Some(leader_ballot);
}
fn check_leader(&mut self) -> Option<Ballot> {
self.majority_connected = true;
let ballots = std::mem::take(&mut self.ballots);
let top_ballot = ballots
.into_iter()
.filter_map(
|(ballot, candidate)| {
if candidate {
Some(ballot)
} else {
None
}
},
)
.max()
.unwrap_or_default();
if top_ballot < self.leader.unwrap_or_default() {
// did not get HB from leader
self.current_ballot.n = self.leader.unwrap_or_default().n + 1;
self.leader = None;
None
} else if self.leader != Some(top_ballot) {
// got a new leader with greater ballot
self.leader = Some(top_ballot);
self.initial_delay = None;
debug!(
self.logger,
"BLE {}, New Leader elected: {:?}", self.pid, top_ballot
);
Some(top_ballot)
} else {
None
}
}
/// Initiates a new heartbeat round.
pub fn new_hb_round(&mut self) {
self.hb_round += 1;
trace!(
self.logger,
"Initiate new heartbeat round: {}",
self.hb_round
);
self.hb_current_delay = self.initial_delay.unwrap_or(self.hb_delay);
for peer in &self.peers {
let hb_request = HeartbeatRequest::with(self.hb_round);
self.outgoing.push(BLEMessage::with(
*peer,
self.pid,
HeartbeatMsg::Request(hb_request),
));
}
}
fn hb_timeout(&mut self) -> Option<Ballot> {
trace!(self.logger, "Heartbeat timeout round: {}", self.hb_round);
let result: Option<Ballot> = if self.ballots.len() + 1 >= self.majority {
debug!(
self.logger,
"Received a majority of heartbeats {:?}", self.ballots
);
self.ballots
.push((self.current_ballot, self.majority_connected));
self.check_leader()
} else {
warn!(
self.logger,
"Did not receive a majority of heartbeats {:?}", self.ballots
);
self.ballots.clear();
self.majority_connected = false;
None
};
self.new_hb_round();
result
}
fn handle_request(&mut self, from: u64, req: HeartbeatRequest) {
trace!(self.logger, "Heartbeat request from {}", from);
let hb_reply =
HeartbeatReply::with(req.round, self.current_ballot, self.majority_connected);
self.outgoing.push(BLEMessage::with(
self.pid,
from,
HeartbeatMsg::Reply(hb_reply),
));
}
fn handle_reply(&mut self, rep: HeartbeatReply) {
if rep.round == self.hb_round {
self.ballots.push((rep.ballot, rep.majority_connected));
} else {
warn!(
self.logger,
"Got late response, round {}, current delay {}, ballot {:?}",
self.hb_round,
self.hb_current_delay,
rep.ballot
);
}
}
}
/// The different messages BLE uses to communicate with other replicas.
pub mod messages {
use crate::ballot_leader_election::Ballot;
use serde::{Serialize, Deserialize};
/// An enum for all the different BLE message types.
#[allow(missing_docs)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum HeartbeatMsg {
Request(HeartbeatRequest),
Reply(HeartbeatReply),
}
/// Requests a reply from all the other replicas.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct HeartbeatRequest {
/// Number of the current round.
pub round: u32,
}
impl HeartbeatRequest {
/// Creates a new HeartbeatRequest
/// # Arguments
/// * `round` - number of the current round.
pub fn with(round: u32) -> HeartbeatRequest {
HeartbeatRequest { round }
}
}
/// Replies
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct HeartbeatReply {
/// Number of the current round.
pub round: u32,
/// Ballot of a replica.
pub ballot: Ballot,
/// States if the replica is a candidate to become a leader.
pub majority_connected: bool,
}
impl HeartbeatReply {
/// Creates a new HeartbeatRequest
/// # Arguments
/// * `round` - Number of the current round.
/// * `ballot` - Ballot of a replica.
/// * `majority_connected` - States if the replica is majority_connected to become a leader.
pub fn with(round: u32, ballot: Ballot, majority_connected: bool) -> HeartbeatReply {
HeartbeatReply {
round,
ballot,
majority_connected,
}
}
}
/// A struct for a Paxos message that also includes sender and receiver.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BLEMessage {
/// Sender of `msg`.
pub from: u64,
/// Receiver of `msg`.
pub to: u64,
/// The message content.
pub msg: HeartbeatMsg,
}
impl BLEMessage {
/// Creates a BLE message.
/// # Arguments
/// * `from` - Sender of `msg`.
/// * `to` - Receiver of `msg`.
/// * `msg` - The message content.
pub fn with(from: u64, to: u64, msg: HeartbeatMsg) -> Self {
BLEMessage { from, to, msg }
}
}
}
/// Configuration for `BallotLeaderElection`.
/// # Fields
/// * `pid`: The unique identifier of this node. Must not be 0.
/// * `peers`: The peers of this node i.e. the `pid`s of the other replicas in the configuration.
/// * `priority`: Set custom priority for this node to be elected as the leader.
/// * `hb_delay`: Timeout for waiting on heartbeat messages. It is measured in number of ticks.
/// * `initial_leader`: The initial leader of the cluster.
/// * `initial_timeout`: Optional initial timeout that can be used to elect a leader faster initially.
/// * `logger`: Custom logger for logging events of Ballot Leader Election.
/// * `logger_file_path`: The path where the default logger logs events.
/// * `buffer_size`: The buffer size for outgoing messages.
#[derive(Clone, Debug)]
pub struct BLEConfig {
pid: u64,
peers: Vec<u64>,
priority: Option<u64>,
hb_delay: u64,
initial_leader: Option<Ballot>,
initial_delay: Option<u64>,
logger: Option<Logger>,
logger_file_path: Option<String>,
buffer_size: usize,
}
#[allow(missing_docs)]
impl BLEConfig {
pub fn set_pid(&mut self, pid: u64) {
self.pid = pid;
}
pub fn get_pid(&self) -> u64 {
self.pid
}
pub fn set_peers(&mut self, peers: Vec<u64>) {
self.peers = peers;
}
pub fn get_peers(&self) -> &[u64] {
self.peers.as_slice()
}
pub fn set_priority(&mut self, priority: u64) {
self.priority = Some(priority);
}
pub fn get_priority(&self) -> Option<u64> {
self.priority
}
pub fn set_hb_delay(&mut self, hb_delay: u64) {
self.hb_delay = hb_delay;
}
pub fn get_hb_delay(&self) -> u64 {
self.hb_delay
}
pub fn set_initial_leader(&mut self, b: Ballot) {
self.initial_leader = Some(b);
}
pub fn get_initial_leader(&self) -> Option<Ballot> {
self.initial_leader
}
pub fn set_initial_delay(&mut self, initial_delay: u64) {
self.initial_delay = Some(initial_delay);
}
pub fn get_initial_delay(&self) -> Option<u64> {
self.initial_delay
}
pub fn set_logger(&mut self, l: Logger) {
self.logger = Some(l);
}
pub fn get_logger(&self) -> Option<&Logger> {
self.logger.as_ref()
}
pub fn set_logger_file_path(&mut self, s: String) {
self.logger_file_path = Some(s);
}
pub fn get_logger_file_path(&self) -> Option<&String> {
self.logger_file_path.as_ref()
}
pub fn set_buffer_size(&mut self, size: usize) {
self.buffer_size = size;
}
pub fn get_buffer_size(&self) -> usize {
self.buffer_size
}
pub fn with_hocon(h: &Hocon) -> Self {
let mut config = Self::default();
config.set_pid(h[PID].as_i64().expect("Failed to load PID") as u64);
match &h[PEERS] {
Hocon::Array(v) => {
let peers = v
.iter()
.map(|x| x.as_i64().expect("Failed to load pid in Hocon array") as u64)
.collect();
config.set_peers(peers);
}
_ => {
unimplemented!("Peers in Hocon should be parsed as array!")
}
}
if let Some(p) = h[LOG_FILE_PATH].as_string() {
config.set_logger_file_path(p);
}
if let Some(p) = h[PRIORITY].as_i64().map(|p| p as u64) {
config.set_priority(p);
}
if let Some(d) = h[INITIAL_DELAY].as_i64().map(|i| i as u64) {
config.set_initial_delay(d);
}
if let Some(b) = h[BLE_BUFFER_SIZE].as_i64() {
config.set_buffer_size(b as usize);
}
config.set_hb_delay(
h[HB_DELAY]
.as_i64()
.expect("Failed to load heartbeat delay") as u64,
);
config
}
}
impl Default for BLEConfig {
fn default() -> Self {
Self {
pid: 0,
peers: vec![],
priority: None,
hb_delay: HB_TIMEOUT,
initial_leader: None,
initial_delay: None,
logger: None,
logger_file_path: None,
buffer_size: DEFAULT_BUFFER_SIZE,
}
}
}
| 32.762677 | 191 | 0.578814 |
0e0ae290f1ce8a584917e13818ab421b84233481 | 343 | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct RefPtr {
pub _address: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct RefPtr_Proxy {
pub _address: u8,
}
| 17.15 | 45 | 0.673469 |
d984302696394a280af974a42fc783de5f05eae7 | 17,767 | // The driver based on the online manual http://www.lowlevel.eu/wiki/RTL8139
#![allow(dead_code)]
use core::mem;
use crate::arch::kernel::irq::*;
use crate::arch::kernel::pci;
use crate::arch::kernel::percore::increment_irq_counter;
use crate::arch::mm::paging::virt_to_phys;
use crate::arch::mm::VirtAddr;
use crate::drivers::error::DriverError;
use crate::drivers::net::{netwakeup, network_irqhandler, NetworkInterface};
use crate::x86::io::*;
/// size of the receive buffer
const RX_BUF_LEN: usize = 8192;
/// size of the send buffer
const TX_BUF_LEN: usize = 4096;
/// the ethernet ID (6bytes) => MAC address
const IDR0: u16 = 0x0;
/// transmit status of each descriptor (4bytes/descriptor) (C mode)
const TSD0: u16 = 0x10;
/// transmit start address of descriptor 0 (4byte, C mode, 4 byte alignment)
const TSAD0: u16 = 0x20;
/// transmit start address of descriptor 1 (4byte, C mode, 4 byte alignment)
const TSAD1: u16 = 0x24;
/// transmit normal priority descriptors start address (8bytes, C+ mode, 256 byte-align)
const TNPDS: u16 = 0x20;
/// transmit start address of descriptor 2 (4byte, C mode, 4 byte alignment)
const TSAD2: u16 = 0x28;
/// transmit start address of descriptor 3 (4byte, C mode, 4 byte alignment)
const TSAD3: u16 = 0x2c;
/// command register (1byte)
const CR: u16 = 0x37;
/// current address of packet read (2byte, C mode, initial value 0xFFF0)
const CAPR: u16 = 0x38;
/// interrupt mask register (2byte)
const IMR: u16 = 0x3c;
/// interrupt status register (2byte)
const ISR: u16 = 0x3e;
/// transmit config register (4byte)
const TCR: u16 = 0x40;
/// receive config register (4byte)
const RCR: u16 = 0x44;
// command register for 93C46 (93C56) (1byte)
const CR9346: u16 = 0x50;
/// config register 0 (1byte)
const CONFIG0: u16 = 0x51;
/// config register 1 (1byte)
const CONFIG1: u16 = 0x52;
/// media status register (1byte)
const MSR: u16 = 0x58;
/// receive buffer start address (C mode, 4 byte alignment)
const RBSTART: u16 = 0x30;
/// basic mode control register (2byte)
const BMCR: u16 = 0x62;
/// basic mode status register (2byte)
const BMSR: u16 = 0x64;
/// Reset, set to 1 to invoke S/W reset, held to 1 while resetting
const CR_RST: u8 = 0x10;
/// Receiver Enable, enables receiving
const CR_RE: u8 = 0x08;
/// Transmitter Enable, enables transmitting
const CR_TE: u8 = 0x04;
/// Rx buffer is empty
const CR_BUFE: u8 = 0x01;
// determine the operating mode
const CR9346_EEM1: u8 = 0x80;
/// 00 = Normal, 01 = Auto-load, 10 = Programming, 11 = Config, Register write enabled
const CR9346_EEM0: u8 = 0x40;
/// status of EESK
const CR9346_EESK: u8 = 0x4;
/// status of EEDI
const CR9346_EEDI: u8 = 0x2;
/// status of EEDO
const CR9346_EEDO: u8 = 0x1;
/// leds status
const CONFIG1_LEDS: u8 = 0xC0;
/// is the driver loaded ?
const CONFIG1_DVRLOAD: u8 = 0x20;
/// lanwake mode
const CONFIG1_LWACT: u8 = 0x10;
/// Memory mapping enabled ?
const CONFIG1_MEMMAP: u8 = 0x8;
/// IO map enabled ?
const CONFIG1_IOMAP: u8 = 0x4;
/// enable the virtual product data
const CONFIG1_VPD: u8 = 0x2;
/// Power Management Enable
const CONFIG1_PMEN: u8 = 0x1;
// Media Status Register
const MSR_TXFCE: u8 = 0x80; // Tx Flow Control enabled
const MSR_RXFCE: u8 = 0x40; // Rx Flow Control enabled
const MSR_AS: u8 = 0x10; // Auxiliary status
const MSR_SPEED: u8 = 0x8; // set if currently talking on 10mbps network, clear if 100mbps
const MSR_LINKB: u8 = 0x4; // Link Bad ?
const MSR_TXPF: u8 = 0x2; // Transmit Pause flag
const MSR_RXPF: u8 = 0x1; // Receive Pause flag
const RCR_ERTH3: u32 = 0x0800_0000; // early Rx Threshold 0
const RCR_ERTH2: u32 = 0x0400_0000; // early Rx Threshold 1
const RCR_ERTH1: u32 = 0x0200_0000; // early Rx Threshold 2
const RCR_ERTH0: u32 = 0x0100_0000; // early Rx Threshold 3
const RCR_MRINT: u32 = 0x20000; // Multiple Early interrupt, (enable to make interrupts happen early, yuk)
const RCR_RER8: u32 = 0x10000; // Receive Error Packets larger than 8 bytes
const RCR_RXFTH2: u32 = 0x8000; // Rx Fifo threshold 0
const RCR_RXFTH1: u32 = 0x4000; // Rx Fifo threshold 1 (set to 110 and it will send to system when 1024bytes have been gathered)
const RCR_RXFTH0: u32 = 0x2000; // Rx Fifo threshold 2 (set all these to 1, and it wont FIFO till the full packet is ready)
const RCR_RBLEN1: u32 = 0x1000; // Rx Buffer length 0
const RCR_RBLEN0: u32 = 0x800; // Rx Buffer length 1 (C mode, 11 = 64kb, 10 = 32k, 01 = 16k, 00 = 8k)
const RCR_MXDMA2: u32 = 0x400; // Max DMA burst size 0
const RCR_MXDMA1: u32 = 0x200; // Max DMA burst size 1
const RCR_MXDMA0: u32 = 0x100; // Max DMA burst size 2
const RCR_WRAP: u32 = 0x80; // (void if buffer size = 64k, C mode, wrap to beginning of Rx buffer if we hit the end)
const RCR_EEPROMSEL: u32 = 0x40; // EEPROM type (0 = 9346, 1 = 9356)
const RCR_AER: u32 = 0x20; // Accept Error Packets (do we accept bad packets ?)
const RCR_AR: u32 = 0x10; // Accept runt packets (accept packets that are too small ?)
const RCR_AB: u32 = 0x08; // Accept Broadcast packets (accept broadcasts ?)
const RCR_AM: u32 = 0x04; // Accept multicast ?
const RCR_APM: u32 = 0x02; // Accept Physical matches (accept packets sent to our mac ?)
const RCR_AAP: u32 = 0x01; // Accept packets with a physical address ?
const TCR_HWVERID: u32 = 0x7CC0_0000; // mask for hw version ID's
const TCR_HWOFFSET: u32 = 22;
const TCR_IFG: u32 = 0x0300_0000; // interframe gap time
const TCR_LBK1: u32 = 0x40000; // loopback test
const TCR_LBK0: u32 = 0x20000; // loopback test
const TCR_CRC: u32 = 0x10000; // append CRC (card adds CRC if 1)
const TCR_MXDMA2: u32 = 0x400; // max dma burst
const TCR_MXDMA1: u32 = 0x200; // max dma burst
const TCR_MXDMA0: u32 = 0x100; // max dma burst
const TCR_TXRR: u32 = 0xF0; // Tx retry count, 0 = 16 else retries TXRR * 16 + 16 times
const TCR_CLRABT: u32 = 0x01; // Clear abort, attempt retransmit (when in abort state)
// Basic mode control register
const BMCR_RESET: u16 = 0x8000; // set the status and control of PHY to default
const BMCR_SPD100: u16 = 1 << 13; // 100 MBit
const BMCR_SPD1000: u16 = 1 << 6; // 1000 MBit
const BMCR_ANE: u16 = 0x1000; // enable N-way autonegotiation (ignore above if set)
const BMCR_RAN: u16 = 0x400; // restart auto-negotiation
const BMCR_DUPLEX: u16 = 0x200; // Duplex mode, generally a value of 1 means full-duplex
// Interrupt Status/Mask Register
// Bits in IMR enable/disable interrupts for specific events
// Bits in ISR indicate the status of the card
const ISR_SERR: u16 = 0x8000; // System error interrupt
const ISR_TUN: u16 = 0x4000; // time out interrupt
const ISR_SWINT: u16 = 0x100; // Software interrupt
const ISR_TDU: u16 = 0x80; // Tx Descriptor unavailable
const ISR_FIFOOVW: u16 = 0x40; // Rx Fifo overflow
const ISR_PUN: u16 = 0x20; // Packet underrun/link change
const ISR_RXOVW: u16 = 0x10; // Rx overflow/Rx Descriptor unavailable
const ISR_TER: u16 = 0x08; // Tx Error
const ISR_TOK: u16 = 0x04; // Tx OK
const ISR_RER: u16 = 0x02; // Rx Error
const ISR_ROK: u16 = 0x01; // Rx OK
const R39_INTERRUPT_MASK: u16 = 0x7f;
// Transmit Status of Descriptor0-3 (C mode only)
const TSD_CRS: u32 = 1 << 31; // carrier sense lost (during packet transmission)
const TSD_TABT: u32 = 1 << 30; // transmission abort
const TSD_OWC: u32 = 1 << 29; // out of window collision
const TSD_CDH: u32 = 1 << 28; // CD Heart beat (Cleared in 100Mb mode)
const TSD_NCC: u32 = 0x0F00_0000; // Number of collisions counted (during transmission)
const TSD_EARTH: u32 = 0x003F_0000; // threshold to begin transmission (0 = 8bytes, 1->2^6 = * 32bytes)
const TSD_TOK: u32 = 1 << 15; // Transmission OK, successful
const TSD_TUN: u32 = 1 << 14; // Transmission FIFO underrun
const TSD_OWN: u32 = 1 << 13; // Tx DMA operation finished (driver must set to 0 when TBC is written)
const TSD_SIZE: u32 = 0x1fff; // Descriptor size, the total size in bytes of data to send (max 1792)
/// To set the RTL8139 to accept only the Transmit OK (TOK) and Receive OK (ROK)
/// interrupts, we would have the TOK and ROK bits of the IMR high and leave the
/// rest low. That way when a TOK or ROK IRQ happens, it actually will go through
/// and fire up an IRQ.
const INT_MASK: u16 = ISR_ROK | ISR_TOK | ISR_RXOVW | ISR_TER | ISR_RER;
/// Beside Receive OK (ROK) interrupt, this mask enable all other interrupts
const INT_MASK_NO_ROK: u16 = ISR_TOK | ISR_RXOVW | ISR_TER | ISR_RER;
const NO_TX_BUFFERS: usize = 4;
#[derive(Debug)]
pub enum RTL8139Error {
InitFailed,
ResetFailed,
Unknown,
}
/// RealTek RTL8139 network driver struct.
///
/// Struct allows to control device queues as also
/// the device itself.
pub struct RTL8139Driver {
iobase: u16,
mtu: u16,
irq: u8,
mac: [u8; 6],
tx_in_use: [bool; NO_TX_BUFFERS],
tx_counter: usize,
rxbuffer: VirtAddr,
rxpos: usize,
txbuffer: VirtAddr,
}
impl NetworkInterface for RTL8139Driver {
/// Returns the MAC address of the network interface
fn get_mac_address(&self) -> [u8; 6] {
self.mac
}
/// Returns the current MTU of the device.
fn get_mtu(&self) -> u16 {
self.mtu
}
fn get_tx_buffer(&mut self, len: usize) -> Result<(*mut u8, usize), ()> {
let id = self.tx_counter % NO_TX_BUFFERS;
if self.tx_in_use[id] || len > TX_BUF_LEN {
error!("Unable to get TX buffer");
Err(())
} else {
self.tx_in_use[id] = true;
self.tx_counter += 1;
Ok(((self.txbuffer.as_usize() + id * TX_BUF_LEN) as *mut u8, id))
}
}
fn free_tx_buffer(&self, _token: usize) {
// get_tx_buffer did not allocate
}
fn send_tx_buffer(&mut self, id: usize, len: usize) -> Result<(), ()> {
// send the packet
unsafe {
outl(
self.iobase + TSD0 as u16 + (4 * id as u16),
len.try_into().unwrap(),
); //|0x3A0000);
}
Ok(())
}
fn has_packet(&self) -> bool {
let cmd = unsafe { inb(self.iobase + CR as u16) };
if (cmd & CR_BUFE) != CR_BUFE {
let header: u16 = unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) };
if header & ISR_ROK == ISR_ROK {
return true;
}
}
false
}
fn receive_rx_buffer(&mut self) -> Result<(&'static [u8], usize), ()> {
let cmd = unsafe { inb(self.iobase + CR as u16) };
if (cmd & CR_BUFE) != CR_BUFE {
let header: u16 = unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) };
self.rxpos = (self.rxpos + mem::size_of::<u16>()) % RX_BUF_LEN;
if header & ISR_ROK == ISR_ROK {
let length: u16 =
unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) } - 4; // copy packet (but not the CRC)
Ok((
unsafe {
core::slice::from_raw_parts(
(self.rxbuffer.as_usize() + self.rxpos + mem::size_of::<u16>())
as *const u8,
length as usize,
)
},
self.rxpos,
))
} else {
error!(
"RTL8192: invalid header {:#x}, rx_pos {}\n",
header, self.rxpos
);
Err(())
}
} else {
Err(())
}
}
// Tells driver, that buffer is consumed and can be deallocated
fn rx_buffer_consumed(&mut self, handle: usize) {
if self.rxpos != handle {
warn!("Invalid handle {} != {}", self.rxpos, handle)
}
let length: u16 = unsafe { *((self.rxbuffer.as_usize() + self.rxpos) as *const u16) };
self.rxpos = (self.rxpos + length as usize + mem::size_of::<u16>()) % RX_BUF_LEN;
// packets are dword aligned
self.rxpos = ((self.rxpos + 3) & !0x3) % RX_BUF_LEN;
unsafe {
outw(self.iobase + CAPR, (self.rxpos - 0x10).try_into().unwrap());
}
}
fn set_polling_mode(&mut self, value: bool) {
if value {
// disable interrupts from the NIC
unsafe {
outw(self.iobase + IMR, INT_MASK_NO_ROK);
}
} else {
// Enable all known interrupts by setting the interrupt mask.
unsafe {
outw(self.iobase + IMR, INT_MASK);
}
}
}
fn handle_interrupt(&mut self) -> bool {
increment_irq_counter((32 + self.irq).into());
let isr_contents = unsafe { inw(self.iobase + ISR) };
if (isr_contents & ISR_TOK) == ISR_TOK {
self.tx_handler();
}
if (isr_contents & ISR_RER) == ISR_RER {
error!("RTL88139: RX error detected!\n");
}
if (isr_contents & ISR_TER) == ISR_TER {
error!("RTL88139r: TX error detected!\n");
}
if (isr_contents & ISR_RXOVW) == ISR_RXOVW {
error!("RTL88139: RX overflow detected!\n");
}
let ret = (isr_contents & ISR_ROK) == ISR_ROK;
if ret {
// handle incoming packets
#[cfg(not(feature = "newlib"))]
netwakeup();
}
unsafe {
outw(
self.iobase + ISR,
isr_contents & (ISR_RXOVW | ISR_TER | ISR_RER | ISR_TOK | ISR_ROK),
);
}
ret
}
}
impl RTL8139Driver {
fn tx_handler(&mut self) {
for i in 0..self.tx_in_use.len() {
if self.tx_in_use[i] {
let txstatus = unsafe { inl(self.iobase + TSD0 + i as u16 * 4) };
if (txstatus & (TSD_TABT | TSD_OWC)) > 0 {
error!("RTL8139: major error");
continue;
}
if (txstatus & TSD_TUN) == TSD_TUN {
error!("RTL8139: transmit underrun");
}
if (txstatus & TSD_TOK) == TSD_TOK {
self.tx_in_use[i] = false;
}
}
}
}
}
impl Drop for RTL8139Driver {
fn drop(&mut self) {
debug!("Dropping RTL8129Driver!");
// Software reset
unsafe {
outb(self.iobase + CR, CR_RST);
}
crate::mm::deallocate(self.rxbuffer, RX_BUF_LEN);
crate::mm::deallocate(self.txbuffer, NO_TX_BUFFERS * TX_BUF_LEN);
}
}
pub fn init_device(adapter: &pci::PciAdapter) -> Result<RTL8139Driver, DriverError> {
let mut iter = adapter.base_addresses.iter().filter_map(|&x| match x {
pci::PciBar::IO(base) => Some(base.addr),
_ => None,
});
let iobase: u16 = iter
.next()
.ok_or(DriverError::InitRTL8139DevFail(RTL8139Error::Unknown))?
.try_into()
.unwrap();
debug!(
"Found RTL8139 at iobase {:#x} (irq {})",
iobase, adapter.irq
);
adapter.make_bus_master();
let mac: [u8; 6] = unsafe {
[
inb(iobase + IDR0),
inb(iobase + IDR0 + 1),
inb(iobase + IDR0 + 2),
inb(iobase + IDR0 + 3),
inb(iobase + IDR0 + 4),
inb(iobase + IDR0 + 5),
]
};
debug!(
"MAC address {:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]
);
unsafe {
if inl(iobase + TCR) == 0x00FF_FFFFu32 {
error!("Unable to initialize RTL8192");
return Err(DriverError::InitRTL8139DevFail(RTL8139Error::InitFailed));
}
// Software reset
outb(iobase + CR, CR_RST);
// The RST bit must be checked to make sure that the chip has finished the reset.
// If the RST bit is high (1), then the reset is still in operation.
crate::arch::kernel::processor::udelay(10000);
let mut tmp: u16 = 10000;
while (inb(iobase + CR) & CR_RST) == CR_RST && tmp > 0 {
tmp -= 1;
}
if tmp == 0 {
error!("RTL8139 reset failed");
return Err(DriverError::InitRTL8139DevFail(RTL8139Error::ResetFailed));
}
// Enable Receive and Transmitter
outb(iobase + CR, CR_TE | CR_RE); // Sets the RE and TE bits high
// lock config register
outb(iobase + CR9346, CR9346_EEM1 | CR9346_EEM0);
// clear all of CONFIG1
outb(iobase + CONFIG1, 0);
// disable driver loaded and lanwake bits, turn driver loaded bit back on
outb(
iobase + CONFIG1,
(inb(iobase + CONFIG1) & !(CONFIG1_DVRLOAD | CONFIG1_LWACT)) | CONFIG1_DVRLOAD,
);
// unlock config register
outb(iobase + CR9346, 0);
/*
* configure receive buffer
* AB - Accept Broadcast: Accept broadcast packets sent to mac ff:ff:ff:ff:ff:ff
* AM - Accept Multicast: Accept multicast packets.
* APM - Accept Physical Match: Accept packets send to NIC's MAC address.
* AAP - Accept All Packets. Accept all packets (run in promiscuous mode).
*/
outl(
iobase + RCR,
RCR_MXDMA2 | RCR_MXDMA1 | RCR_MXDMA0 | RCR_AB | RCR_AM | RCR_APM | RCR_AAP,
); // The WRAP bit isn't set!
// set the transmit config register to
// be the normal interframe gap time
// set DMA max burst to 64bytes
outl(iobase + TCR, TCR_IFG | TCR_MXDMA0 | TCR_MXDMA1 | TCR_MXDMA2);
}
let rxbuffer = crate::mm::allocate(RX_BUF_LEN, true);
let txbuffer = crate::mm::allocate(NO_TX_BUFFERS * TX_BUF_LEN, true);
if txbuffer.is_zero() || rxbuffer.is_zero() {
error!("Unable to allocate buffers for RTL8139");
return Err(DriverError::InitRTL8139DevFail(RTL8139Error::Unknown));
}
debug!(
"Allocate TxBuffer at {:#x} and RxBuffer at {:#x}",
txbuffer, rxbuffer
);
unsafe {
// register the receive buffer
outl(
iobase + RBSTART,
virt_to_phys(rxbuffer).as_u64().try_into().unwrap(),
);
// set each of the transmitter start address descriptors
outl(
iobase + TSAD0,
virt_to_phys(txbuffer).as_u64().try_into().unwrap(),
);
outl(
iobase + TSAD1,
virt_to_phys(txbuffer + TX_BUF_LEN)
.as_u64()
.try_into()
.unwrap(),
);
outl(
iobase + TSAD2,
virt_to_phys(txbuffer + 2 * TX_BUF_LEN)
.as_u64()
.try_into()
.unwrap(),
);
outl(
iobase + TSAD3,
virt_to_phys(txbuffer + 3 * TX_BUF_LEN)
.as_u64()
.try_into()
.unwrap(),
);
// Enable all known interrupts by setting the interrupt mask.
outw(iobase + IMR, INT_MASK);
outw(iobase + BMCR, BMCR_ANE);
let speed;
let tmp = inw(iobase + BMCR);
if tmp & BMCR_SPD1000 == BMCR_SPD1000 {
speed = 1000;
} else if tmp & BMCR_SPD100 == BMCR_SPD100 {
speed = 100;
} else {
speed = 10;
}
// Enable Receive and Transmitter
outb(iobase + CR, CR_TE | CR_RE); // Sets the RE and TE bits high
info!(
"RTL8139: CR = {:#x}, ISR = {:#x}, speed = {} mbps",
inb(iobase + CR),
inw(iobase + ISR),
speed
);
}
// Install interrupt handler for RTL8139
debug!("Install interrupt handler for RTL8139 at {}", adapter.irq);
irq_install_handler(adapter.irq.into(), network_irqhandler as usize);
add_irq_name(adapter.irq as u32, "rtl8139_net");
Ok(RTL8139Driver {
iobase,
mtu: 1500,
irq: adapter.irq,
mac,
tx_in_use: [false; NO_TX_BUFFERS],
tx_counter: 0,
rxbuffer,
rxpos: 0,
txbuffer,
})
}
| 30.319113 | 128 | 0.669725 |
ccc66ffdc78f590512f880d95bf3782f81dbafa6 | 1,930 | use crate::plugins::req::get_url;
use teloxide::prelude::*;
use teloxide::requests::ResponseResult;
use teloxide::types::{
InlineKeyboardButton, InlineKeyboardButtonKind, InlineKeyboardMarkup, ReplyMarkup,
};
pub async fn perocmd_paste(
cx: UpdateWithCx<AutoSend<Bot>, Message>,
cntnt: String,
) -> ResponseResult<Message> {
if let Some(reply) = cx.update.reply_to_message() {
let url = format!(
"https://api.itayki.com/paste?content={}",
reply.text().unwrap()
);
let response = get_url(url.to_string()).await;
let pasteu = &response.clone().unwrap()["paste_url"]
.to_string()
.trim_matches('"')
.to_string();
cx.reply_to(format!("I have pasted this text!"))
.reply_markup(ReplyMarkup::InlineKeyboard(InlineKeyboardMarkup::new(
vec![vec![InlineKeyboardButton::new(
"Paste URL",
InlineKeyboardButtonKind::Url(pasteu.into()),
)]],
)))
.await
} else {
if cntnt.is_empty() {
cx.reply_to("Please provide some text or reply to a message to paste it!")
.await
} else {
let url = format!("https://api.itayki.com/paste?content={}", cntnt);
let response = get_url(url.to_string()).await;
let pasteu = &response.clone().unwrap()["paste_url"]
.to_string()
.trim_matches('"')
.to_string();
cx.reply_to(format!("I have pasted this text!"))
.reply_markup(ReplyMarkup::InlineKeyboard(InlineKeyboardMarkup::new(
vec![vec![InlineKeyboardButton::new(
"Paste URL",
InlineKeyboardButtonKind::Url(pasteu.into()),
)]],
)))
.await
}
}
}
| 37.115385 | 86 | 0.534197 |
1cb8eb80753944f3fc40e50c632de0a4af564d14 | 21,697 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::collections::BTreeMap;
use serde_derive::{Deserialize, Serialize};
use serde_json::{json, Value};
use crate::error::{ArrowError, Result};
use super::DataType;
/// Contains the meta-data for a single relative type.
///
/// The `Schema` object is an ordered collection of `Field` objects.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Field {
name: String,
data_type: DataType,
nullable: bool,
dict_id: i64,
dict_is_ordered: bool,
/// A map of key-value pairs containing additional custom meta data.
#[serde(skip_serializing_if = "Option::is_none")]
metadata: Option<BTreeMap<String, String>>,
}
impl Field {
/// Creates a new field
pub fn new(name: &str, data_type: DataType, nullable: bool) -> Self {
Field {
name: name.to_string(),
data_type,
nullable,
dict_id: 0,
dict_is_ordered: false,
metadata: None,
}
}
/// Creates a new field
pub fn new_dict(
name: &str,
data_type: DataType,
nullable: bool,
dict_id: i64,
dict_is_ordered: bool,
) -> Self {
Field {
name: name.to_string(),
data_type,
nullable,
dict_id,
dict_is_ordered,
metadata: None,
}
}
/// Sets the `Field`'s optional custom metadata.
/// The metadata is set as `None` for empty map.
#[inline]
pub fn set_metadata(&mut self, metadata: Option<BTreeMap<String, String>>) {
// To make serde happy, convert Some(empty_map) to None.
self.metadata = None;
if let Some(v) = metadata {
if !v.is_empty() {
self.metadata = Some(v);
}
}
}
/// Returns the immutable reference to the `Field`'s optional custom metadata.
#[inline]
pub const fn metadata(&self) -> &Option<BTreeMap<String, String>> {
&self.metadata
}
/// Returns an immutable reference to the `Field`'s name.
#[inline]
pub const fn name(&self) -> &String {
&self.name
}
/// Returns an immutable reference to the `Field`'s data-type.
#[inline]
pub const fn data_type(&self) -> &DataType {
&self.data_type
}
/// Indicates whether this `Field` supports null values.
#[inline]
pub const fn is_nullable(&self) -> bool {
self.nullable
}
/// Returns the dictionary ID, if this is a dictionary type.
#[inline]
pub const fn dict_id(&self) -> Option<i64> {
match self.data_type {
DataType::Dictionary(_, _) => Some(self.dict_id),
_ => None,
}
}
/// Returns whether this `Field`'s dictionary is ordered, if this is a dictionary type.
#[inline]
pub const fn dict_is_ordered(&self) -> Option<bool> {
match self.data_type {
DataType::Dictionary(_, _) => Some(self.dict_is_ordered),
_ => None,
}
}
/// Parse a `Field` definition from a JSON representation.
pub fn from(json: &Value) -> Result<Self> {
match *json {
Value::Object(ref map) => {
let name = match map.get("name") {
Some(&Value::String(ref name)) => name.to_string(),
_ => {
return Err(ArrowError::ParseError(
"Field missing 'name' attribute".to_string(),
));
}
};
let nullable = match map.get("nullable") {
Some(&Value::Bool(b)) => b,
_ => {
return Err(ArrowError::ParseError(
"Field missing 'nullable' attribute".to_string(),
));
}
};
let data_type = match map.get("type") {
Some(t) => DataType::from(t)?,
_ => {
return Err(ArrowError::ParseError(
"Field missing 'type' attribute".to_string(),
));
}
};
// Referenced example file: testing/data/arrow-ipc-stream/integration/1.0.0-littleendian/generated_custom_metadata.json.gz
let metadata = match map.get("metadata") {
Some(&Value::Array(ref values)) => {
let mut res: BTreeMap<String, String> = BTreeMap::new();
for value in values {
match value.as_object() {
Some(map) => {
if map.len() != 2 {
return Err(ArrowError::ParseError(
"Field 'metadata' must have exact two entries for each key-value map".to_string(),
));
}
if let (Some(k), Some(v)) =
(map.get("key"), map.get("value"))
{
if let (Some(k_str), Some(v_str)) =
(k.as_str(), v.as_str())
{
res.insert(
k_str.to_string().clone(),
v_str.to_string().clone(),
);
} else {
return Err(ArrowError::ParseError("Field 'metadata' must have map value of string type".to_string()));
}
} else {
return Err(ArrowError::ParseError("Field 'metadata' lacks map keys named \"key\" or \"value\"".to_string()));
}
}
_ => {
return Err(ArrowError::ParseError(
"Field 'metadata' contains non-object key-value pair".to_string(),
));
}
}
}
Some(res)
}
// We also support map format, because Schema's metadata supports this.
// See https://github.com/apache/arrow/pull/5907
Some(&Value::Object(ref values)) => {
let mut res: BTreeMap<String, String> = BTreeMap::new();
for (k, v) in values {
if let Some(str_value) = v.as_str() {
res.insert(k.clone(), str_value.to_string().clone());
} else {
return Err(ArrowError::ParseError(
format!("Field 'metadata' contains non-string value for key {}", k),
));
}
}
Some(res)
}
Some(_) => {
return Err(ArrowError::ParseError(
"Field `metadata` is not json array".to_string(),
));
}
_ => None,
};
// if data_type is a struct or list, get its children
let data_type = match data_type {
DataType::List(_)
| DataType::LargeList(_)
| DataType::FixedSizeList(_, _) => match map.get("children") {
Some(Value::Array(values)) => {
if values.len() != 1 {
return Err(ArrowError::ParseError(
"Field 'children' must have one element for a list data type".to_string(),
));
}
match data_type {
DataType::List(_) => {
DataType::List(Box::new(Self::from(&values[0])?))
}
DataType::LargeList(_) => {
DataType::LargeList(Box::new(Self::from(&values[0])?))
}
DataType::FixedSizeList(_, int) => DataType::FixedSizeList(
Box::new(Self::from(&values[0])?),
int,
),
_ => unreachable!(
"Data type should be a list, largelist or fixedsizelist"
),
}
}
Some(_) => {
return Err(ArrowError::ParseError(
"Field 'children' must be an array".to_string(),
))
}
None => {
return Err(ArrowError::ParseError(
"Field missing 'children' attribute".to_string(),
));
}
},
DataType::Struct(mut fields) => match map.get("children") {
Some(Value::Array(values)) => {
let struct_fields: Result<Vec<Field>> =
values.iter().map(|v| Field::from(v)).collect();
fields.append(&mut struct_fields?);
DataType::Struct(fields)
}
Some(_) => {
return Err(ArrowError::ParseError(
"Field 'children' must be an array".to_string(),
))
}
None => {
return Err(ArrowError::ParseError(
"Field missing 'children' attribute".to_string(),
));
}
},
_ => data_type,
};
let mut dict_id = 0;
let mut dict_is_ordered = false;
let data_type = match map.get("dictionary") {
Some(dictionary) => {
let index_type = match dictionary.get("indexType") {
Some(t) => DataType::from(t)?,
_ => {
return Err(ArrowError::ParseError(
"Field missing 'indexType' attribute".to_string(),
));
}
};
dict_id = match dictionary.get("id") {
Some(Value::Number(n)) => n.as_i64().unwrap(),
_ => {
return Err(ArrowError::ParseError(
"Field missing 'id' attribute".to_string(),
));
}
};
dict_is_ordered = match dictionary.get("isOrdered") {
Some(&Value::Bool(n)) => n,
_ => {
return Err(ArrowError::ParseError(
"Field missing 'isOrdered' attribute".to_string(),
));
}
};
DataType::Dictionary(Box::new(index_type), Box::new(data_type))
}
_ => data_type,
};
Ok(Field {
name,
data_type,
nullable,
dict_id,
dict_is_ordered,
metadata,
})
}
_ => Err(ArrowError::ParseError(
"Invalid json value type for field".to_string(),
)),
}
}
/// Generate a JSON representation of the `Field`.
pub fn to_json(&self) -> Value {
let children: Vec<Value> = match self.data_type() {
DataType::Struct(fields) => fields.iter().map(|f| f.to_json()).collect(),
DataType::List(field) => vec![field.to_json()],
DataType::LargeList(field) => vec![field.to_json()],
DataType::FixedSizeList(field, _) => vec![field.to_json()],
_ => vec![],
};
match self.data_type() {
DataType::Dictionary(ref index_type, ref value_type) => json!({
"name": self.name,
"nullable": self.nullable,
"type": value_type.to_json(),
"children": children,
"dictionary": {
"id": self.dict_id,
"indexType": index_type.to_json(),
"isOrdered": self.dict_is_ordered
}
}),
_ => json!({
"name": self.name,
"nullable": self.nullable,
"type": self.data_type.to_json(),
"children": children
}),
}
}
/// Merge field into self if it is compatible. Struct will be merged recursively.
/// NOTE: `self` may be updated to unexpected state in case of merge failure.
///
/// Example:
///
/// ```
/// use arrow::datatypes::*;
///
/// let mut field = Field::new("c1", DataType::Int64, false);
/// assert!(field.try_merge(&Field::new("c1", DataType::Int64, true)).is_ok());
/// assert!(field.is_nullable());
/// ```
pub fn try_merge(&mut self, from: &Field) -> Result<()> {
// merge metadata
match (self.metadata(), from.metadata()) {
(Some(self_metadata), Some(from_metadata)) => {
let mut merged = self_metadata.clone();
for (key, from_value) in from_metadata {
if let Some(self_value) = self_metadata.get(key) {
if self_value != from_value {
return Err(ArrowError::SchemaError(format!(
"Fail to merge field due to conflicting metadata data value for key {}", key),
));
}
} else {
merged.insert(key.clone(), from_value.clone());
}
}
self.set_metadata(Some(merged));
}
(None, Some(from_metadata)) => {
self.set_metadata(Some(from_metadata.clone()));
}
_ => {}
}
if from.dict_id != self.dict_id {
return Err(ArrowError::SchemaError(
"Fail to merge schema Field due to conflicting dict_id".to_string(),
));
}
if from.dict_is_ordered != self.dict_is_ordered {
return Err(ArrowError::SchemaError(
"Fail to merge schema Field due to conflicting dict_is_ordered"
.to_string(),
));
}
match &mut self.data_type {
DataType::Struct(nested_fields) => match &from.data_type {
DataType::Struct(from_nested_fields) => {
for from_field in from_nested_fields {
let mut is_new_field = true;
for self_field in nested_fields.iter_mut() {
if self_field.name != from_field.name {
continue;
}
is_new_field = false;
self_field.try_merge(from_field)?;
}
if is_new_field {
nested_fields.push(from_field.clone());
}
}
}
_ => {
return Err(ArrowError::SchemaError(
"Fail to merge schema Field due to conflicting datatype"
.to_string(),
));
}
},
DataType::Union(nested_fields) => match &from.data_type {
DataType::Union(from_nested_fields) => {
for from_field in from_nested_fields {
let mut is_new_field = true;
for self_field in nested_fields.iter_mut() {
if from_field == self_field {
is_new_field = false;
break;
}
}
if is_new_field {
nested_fields.push(from_field.clone());
}
}
}
_ => {
return Err(ArrowError::SchemaError(
"Fail to merge schema Field due to conflicting datatype"
.to_string(),
));
}
},
DataType::Null
| DataType::Boolean
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::Int64
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Float16
| DataType::Float32
| DataType::Float64
| DataType::Timestamp(_, _)
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Duration(_)
| DataType::Binary
| DataType::LargeBinary
| DataType::Interval(_)
| DataType::LargeList(_)
| DataType::List(_)
| DataType::Dictionary(_, _)
| DataType::FixedSizeList(_, _)
| DataType::FixedSizeBinary(_)
| DataType::Utf8
| DataType::LargeUtf8
| DataType::Decimal(_, _) => {
if self.data_type != from.data_type {
return Err(ArrowError::SchemaError(
"Fail to merge schema Field due to conflicting datatype"
.to_string(),
));
}
}
}
if from.nullable {
self.nullable = from.nullable;
}
Ok(())
}
/// Check to see if `self` is a superset of `other` field. Superset is defined as:
///
/// * if nullability doesn't match, self needs to be nullable
/// * self.metadata is a superset of other.metadata
/// * all other fields are equal
pub fn contains(&self, other: &Field) -> bool {
if self.name != other.name
|| self.data_type != other.data_type
|| self.dict_id != other.dict_id
|| self.dict_is_ordered != other.dict_is_ordered
{
return false;
}
if self.nullable != other.nullable && !self.nullable {
return false;
}
// make sure self.metadata is a superset of other.metadata
match (&self.metadata, &other.metadata) {
(None, Some(_)) => {
return false;
}
(Some(self_meta), Some(other_meta)) => {
for (k, v) in other_meta.iter() {
match self_meta.get(k) {
Some(s) => {
if s != v {
return false;
}
}
None => {
return false;
}
}
}
}
_ => {}
}
true
}
}
// TODO: improve display with crate https://crates.io/crates/derive_more ?
impl std::fmt::Display for Field {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
| 40.031365 | 149 | 0.417846 |
8a8b584b75c499c82d33dcd590a1769bc6cf3a36 | 714 | extern crate breadboard;
extern crate env_logger;
extern crate hyper;
extern crate serde_json;
use breadboard::Breadboard;
use hyper::rt::Future;
use hyper::server::Server;
use hyper::{Request, Response};
use serde_json::Value;
use std::string::ParseError; // waiting for `!`
fn echo(request: Request<Value>) -> Result<Response<Value>, ParseError> {
Ok(Response::new(request.into_body()))
}
fn main() {
env_logger::init();
let board = Breadboard::new().post("/", echo);
let server = Server::bind(&"127.0.0.1:3000".parse().unwrap())
.serve(board)
.map_err(|e| eprintln!("server error: {}", e));
eprintln!("Listening on http://127.0.0.1:3000/");
hyper::rt::run(server);
}
| 26.444444 | 73 | 0.658263 |
5636f8bee8c64232cb416929af88319b0411d55a | 1,471 | use std::mem;
use crate::pic::Thunkable;
#[repr(packed)]
struct CallAbs {
// call [rip+8]
opcode0: u8,
opcode1: u8,
dummy0: u32,
// jmp +10
dummy1: u8,
dummy2: u8,
// destination
address: usize,
}
pub fn call_abs(destination: usize) -> Box<dyn Thunkable> {
let code = CallAbs {
opcode0: 0xFF,
opcode1: 0x15,
dummy0: 0x0_0000_0002,
dummy1: 0xEB,
dummy2: 0x08,
address: destination,
};
let slice: [u8; 16] = unsafe { mem::transmute(code) };
Box::new(slice.to_vec())
}
#[repr(packed)]
struct JumpAbs {
// jmp +6
opcode0: u8,
opcode1: u8,
dummy0: u32,
// destination
address: usize,
}
pub fn jmp_abs(destination: usize) -> Box<dyn Thunkable> {
let code = JumpAbs {
opcode0: 0xFF,
opcode1: 0x25,
dummy0: 0x0_0000_0000,
address: destination,
};
let slice: [u8; 14] = unsafe { mem::transmute(code) };
Box::new(slice.to_vec())
}
#[repr(packed)]
struct JccAbs {
// jxx + 16
opcode: u8,
dummy0: u8,
dummy1: u8,
dummy2: u8,
dummy3: u32,
// destination
address: usize,
}
pub fn jcc_abs(destination: usize, condition: u8) -> Box<dyn Thunkable> {
let code = JccAbs {
// Invert the condition in x64 mode to simplify the conditional jump logic
opcode: 0x71 ^ condition,
dummy0: 0x0E,
dummy1: 0xFF,
dummy2: 0x25,
dummy3: 0x0000_0000,
address: destination,
};
let slice: [u8; 16] = unsafe { mem::transmute(code) };
Box::new(slice.to_vec())
}
| 18.620253 | 78 | 0.622706 |
ff6e74bedcaeb6f8dc7e2f619c07f31bb9766671 | 1,626 | // This is a test code to run benchmark
// Based on basic example in examples/basic.rs
// 1. Run prove() on b"sample"
// 2. Run prove() on random bytes, length from 10 to 16384
// 3. Run verify() on proof of b"sample"
#![feature(test)]
extern crate test;
use test::Bencher;
use vrf::openssl::{CipherSuite, ECVRF};
use vrf::VRF;
const SUITE: CipherSuite = CipherSuite::P256_SHA256_SWU;
#[bench]
fn bench_prove(b: &mut Bencher) {
let mut vrf = ECVRF::from_suite(SUITE).unwrap();
let secret_key =
hex::decode("c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721").unwrap();
let message: &[u8] = b"sample";
b.iter(|| {
vrf.prove(&secret_key, &message).unwrap();
});
}
#[bench]
fn bench_prove_rand_message(b: &mut Bencher) {
let mut vrf = ECVRF::from_suite(SUITE).unwrap();
let secret_key =
hex::decode("c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721").unwrap();
let message_len = rand::random::<usize>() % 16375 + 10;
let message: Vec<u8> = (0..message_len).map(|_| rand::random::<u8>()).collect();
b.iter(|| {
vrf.prove(&secret_key, &message).unwrap();
});
}
#[bench]
fn bench_verify(b: &mut Bencher) {
let mut vrf = ECVRF::from_suite(SUITE).unwrap();
let secret_key =
hex::decode("c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721").unwrap();
let public_key = vrf.derive_public_key(&secret_key).unwrap();
let message: &[u8] = b"sample";
let pi = vrf.prove(&secret_key, &message).unwrap();
b.iter(|| {
vrf.verify(&public_key, &pi, &message).unwrap();
});
}
| 31.882353 | 97 | 0.662362 |
f8ce28c15e6f48740c8dc5002b59fe98111252ae | 3,789 | #![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct HasPrivate {
pub mNotPrivate: ::std::os::raw::c_int,
///```text
/// <div rustbindgen private></div>
///```
mIsPrivate: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout_HasPrivate() {
assert_eq!(
::std::mem::size_of::<HasPrivate>(),
8usize,
concat!("Size of: ", stringify!(HasPrivate))
);
assert_eq!(
::std::mem::align_of::<HasPrivate>(),
4usize,
concat!("Alignment of ", stringify!(HasPrivate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<HasPrivate>())).mNotPrivate as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(HasPrivate),
"::",
stringify!(mNotPrivate)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<HasPrivate>())).mIsPrivate as *const _
as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(HasPrivate),
"::",
stringify!(mIsPrivate)
)
);
}
///```text
/// <div rustbindgen private></div>
///```
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct VeryPrivate {
mIsPrivate: ::std::os::raw::c_int,
mIsAlsoPrivate: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout_VeryPrivate() {
assert_eq!(
::std::mem::size_of::<VeryPrivate>(),
8usize,
concat!("Size of: ", stringify!(VeryPrivate))
);
assert_eq!(
::std::mem::align_of::<VeryPrivate>(),
4usize,
concat!("Alignment of ", stringify!(VeryPrivate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<VeryPrivate>())).mIsPrivate as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(VeryPrivate),
"::",
stringify!(mIsPrivate)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<VeryPrivate>())).mIsAlsoPrivate as *const _
as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(VeryPrivate),
"::",
stringify!(mIsAlsoPrivate)
)
);
}
///```text
/// <div rustbindgen private></div>
///```
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct ContradictPrivate {
///```text
/// <div rustbindgen private="false"></div>
///```
pub mNotPrivate: ::std::os::raw::c_int,
mIsPrivate: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout_ContradictPrivate() {
assert_eq!(
::std::mem::size_of::<ContradictPrivate>(),
8usize,
concat!("Size of: ", stringify!(ContradictPrivate))
);
assert_eq!(
::std::mem::align_of::<ContradictPrivate>(),
4usize,
concat!("Alignment of ", stringify!(ContradictPrivate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContradictPrivate>())).mNotPrivate
as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(ContradictPrivate),
"::",
stringify!(mNotPrivate)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContradictPrivate>())).mIsPrivate as *const _
as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(ContradictPrivate),
"::",
stringify!(mIsPrivate)
)
);
}
| 24.445161 | 80 | 0.490895 |
675530ec3b10fbae7ca0903403bfe794ab46eee5 | 8,927 | use num::Zero;
use crate::allocator::Allocator;
use crate::{RealField, ComplexField};
use crate::storage::{Storage, StorageMut};
use crate::base::{DefaultAllocator, Matrix, Dim, MatrixMN};
use crate::constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};
// FIXME: this should be be a trait on alga?
/// A trait for abstract matrix norms.
///
/// This may be moved to the alga crate in the future.
pub trait Norm<N: ComplexField> {
/// Apply this norm to the given matrix.
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C>;
/// Use the metric induced by this norm to compute the metric distance between the two given matrices.
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>;
}
/// Euclidean norm.
pub struct EuclideanNorm;
/// Lp norm.
pub struct LpNorm(pub i32);
/// L-infinite norm aka. Chebytchev norm aka. uniform norm aka. suppremum norm.
pub struct UniformNorm;
impl<N: ComplexField> Norm<N> for EuclideanNorm {
#[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C> {
m.norm_squared().sqrt()
}
#[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::RealField::zero(), |acc, a, b| {
let diff = a - b;
acc + diff.modulus_squared()
}).sqrt()
}
}
impl<N: ComplexField> Norm<N> for LpNorm {
#[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C> {
m.fold(N::RealField::zero(), |a, b| {
a + b.modulus().powi(self.0)
}).powf(crate::convert(1.0 / (self.0 as f64)))
}
#[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::RealField::zero(), |acc, a, b| {
let diff = a - b;
acc + diff.modulus().powi(self.0)
}).powf(crate::convert(1.0 / (self.0 as f64)))
}
}
impl<N: ComplexField> Norm<N> for UniformNorm {
#[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C> {
// NOTE: we don't use `m.amax()` here because for the complex
// numbers this will return the max norm1 instead of the modulus.
m.fold(N::RealField::zero(), |acc, a| acc.max(a.modulus()))
}
#[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::RealField::zero(), |acc, a, b| {
let val = (a - b).modulus();
if val > acc {
val
} else {
acc
}
})
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The squared L2 norm of this vector.
#[inline]
pub fn norm_squared(&self) -> N::RealField {
let mut res = N::RealField::zero();
for i in 0..self.ncols() {
let col = self.column(i);
res += col.dotc(&col).real()
}
res
}
/// The L2 norm of this matrix.
///
/// Use `.apply_norm` to apply a custom norm.
#[inline]
pub fn norm(&self) -> N::RealField {
self.norm_squared().sqrt()
}
/// Compute the distance between `self` and `rhs` using the metric induced by the euclidean norm.
///
/// Use `.apply_metric_distance` to apply a custom norm.
#[inline]
pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N::RealField
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
self.apply_metric_distance(rhs, &EuclideanNorm)
}
/// Uses the given `norm` to compute the norm of `self`.
///
/// # Example
///
/// ```
/// # use nalgebra::{Vector3, UniformNorm, LpNorm, EuclideanNorm};
///
/// let v = Vector3::new(1.0, 2.0, 3.0);
/// assert_eq!(v.apply_norm(&UniformNorm), 3.0);
/// assert_eq!(v.apply_norm(&LpNorm(1)), 6.0);
/// assert_eq!(v.apply_norm(&EuclideanNorm), v.norm());
/// ```
#[inline]
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N::RealField {
norm.norm(self)
}
/// Uses the metric induced by the given `norm` to compute the metric distance between `self` and `rhs`.
///
/// # Example
///
/// ```
/// # use nalgebra::{Vector3, UniformNorm, LpNorm, EuclideanNorm};
///
/// let v1 = Vector3::new(1.0, 2.0, 3.0);
/// let v2 = Vector3::new(10.0, 20.0, 30.0);
///
/// assert_eq!(v1.apply_metric_distance(&v2, &UniformNorm), 27.0);
/// assert_eq!(v1.apply_metric_distance(&v2, &LpNorm(1)), 27.0 + 18.0 + 9.0);
/// assert_eq!(v1.apply_metric_distance(&v2, &EuclideanNorm), (v1 - v2).norm());
/// ```
#[inline]
pub fn apply_metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>, norm: &impl Norm<N>) -> N::RealField
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
norm.metric_distance(self, rhs)
}
/// A synonym for the norm of this matrix.
///
/// Aka the length.
///
/// This function is simply implemented as a call to `norm()`
#[inline]
pub fn magnitude(&self) -> N::RealField {
self.norm()
}
/// A synonym for the squared norm of this matrix.
///
/// Aka the squared length.
///
/// This function is simply implemented as a call to `norm_squared()`
#[inline]
pub fn magnitude_squared(&self) -> N::RealField {
self.norm_squared()
}
/// Sets the magnitude of this vector unless it is smaller than `min_magnitude`.
///
/// If `self.magnitude()` is smaller than `min_magnitude`, it will be left unchanged.
/// Otherwise this is equivalent to: `*self = self.normalize() * magnitude.
#[inline]
pub fn try_set_magnitude(&mut self, magnitude: N::RealField, min_magnitude: N::RealField)
where S: StorageMut<N, R, C> {
let n = self.norm();
if n >= min_magnitude {
self.scale_mut(magnitude / n)
}
}
/// Returns a normalized version of this matrix.
#[inline]
#[must_use = "Did you mean to use normalize_mut()?"]
pub fn normalize(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
self.unscale(self.norm())
}
/// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`.
#[inline]
#[must_use = "Did you mean to use try_normalize_mut()?"]
pub fn try_normalize(&self, min_norm: N::RealField) -> Option<MatrixMN<N, R, C>>
where DefaultAllocator: Allocator<N, R, C> {
let n = self.norm();
if n <= min_norm {
None
} else {
Some(self.unscale(n))
}
}
/// The Lp norm of this matrix.
#[inline]
pub fn lp_norm(&self, p: i32) -> N::RealField {
self.apply_norm(&LpNorm(p))
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Normalizes this matrix in-place and returns its norm.
#[inline]
pub fn normalize_mut(&mut self) -> N::RealField {
let n = self.norm();
self.unscale_mut(n);
n
}
/// Normalizes this matrix in-place or does nothing if its norm is smaller or equal to `eps`.
///
/// If the normalization succeeded, returns the old norm of this matrix.
#[inline]
pub fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option<N::RealField> {
let n = self.norm();
if n <= min_norm {
None
} else {
self.unscale_mut(n);
Some(n)
}
}
}
| 34.600775 | 125 | 0.564019 |
912424c98996ffeb4d0a2cd10a38fe9ea5b5b0cc | 4,606 | // Copyright 2019 The vault713 Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod api;
mod broker;
#[macro_use]
mod common;
mod contacts;
mod controller;
mod internal;
mod wallet;
use clap::{crate_version, App, Arg, ArgMatches};
use colored::*;
use common::config::Wallet713Config;
use common::{ErrorKind, Result, RuntimeMode};
use contacts::{AddressBook, Backend};
use controller::cli::CLI;
use grin_core::global::{set_mining_mode, ChainTypes};
use wallet::create_container;
fn do_config(
args: &ArgMatches,
chain: &Option<ChainTypes>,
silent: bool,
new_address_index: Option<u32>,
config_path: Option<&str>,
) -> Result<Wallet713Config> {
let mut config;
let mut any_matches = false;
let exists = Wallet713Config::exists(config_path, &chain)?;
if exists {
config = Wallet713Config::from_file(config_path, &chain)?;
} else {
config = Wallet713Config::default(&chain)?;
}
if let Some(data_path) = args.value_of("data-path") {
config.wallet713_data_path = data_path.to_string();
any_matches = true;
}
if let Some(domain) = args.value_of("domain") {
config.grinbox_domain = domain.to_string();
any_matches = true;
}
if let Some(port) = args.value_of("port") {
let port = u16::from_str_radix(port, 10).map_err(|_| ErrorKind::NumberParsingError)?;
config.grinbox_port = Some(port);
any_matches = true;
}
if let Some(node_uri) = args.value_of("node-uri") {
config.grin_node_uri = Some(node_uri.to_string());
any_matches = true;
}
if let Some(node_secret) = args.value_of("node-secret") {
config.grin_node_secret = Some(node_secret.to_string());
any_matches = true;
}
if new_address_index.is_some() {
config.grinbox_address_index = new_address_index;
any_matches = true;
}
config.to_file(config_path.map(|p| p.to_owned()))?;
if !any_matches && !silent {
cli_message!("{}", config);
}
Ok(config)
}
fn welcome(args: &ArgMatches, runtime_mode: &RuntimeMode) -> Result<Wallet713Config> {
let chain: Option<ChainTypes> = match args.is_present("floonet") {
true => Some(ChainTypes::Floonet),
false => Some(ChainTypes::Mainnet),
};
unsafe {
common::set_runtime_mode(runtime_mode);
};
let config = do_config(args, &chain, true, None, args.value_of("config-path"))?;
set_mining_mode(config.chain.clone().unwrap_or(ChainTypes::Mainnet));
Ok(config)
}
fn main() {
enable_ansi_support();
let matches = App::new("wallet713")
.version(crate_version!())
.arg(Arg::from_usage("[config-path] -c, --config=<config-path> 'the path to the config file'"))
.arg(Arg::from_usage("[log-config-path] -l, --log-config-path=<log-config-path> 'the path to the log config file'"))
.arg(Arg::from_usage("[account] -a, --account=<account> 'the account to use'"))
.arg(Arg::from_usage("[daemon] -d, --daemon 'run daemon'"))
.arg(Arg::from_usage("[floonet] -f, --floonet 'use floonet'"))
.get_matches();
let runtime_mode = match matches.is_present("daemon") {
true => RuntimeMode::Daemon,
false => RuntimeMode::Cli,
};
let config: Wallet713Config = welcome(&matches, &runtime_mode).unwrap_or_else(|e| {
panic!(
"{}: could not read or create config! {}",
"ERROR".bright_red(),
e
);
});
if runtime_mode == RuntimeMode::Daemon {
env_logger::init();
}
let data_path_buf = config.get_data_path().unwrap();
let data_path = data_path_buf.to_str().unwrap();
let address_book_backend =
Backend::new(data_path).expect("could not create address book backend!");
let address_book = AddressBook::new(Box::new(address_book_backend))
.expect("could not create an address book!");
let container = create_container(config, address_book).unwrap();
let cli = CLI::new(container);
cli.start();
press_any_key();
}
#[cfg(windows)]
pub fn enable_ansi_support() {
if !ansi_term::enable_ansi_support().is_ok() {
colored::control::set_override(false);
}
}
#[cfg(not(windows))]
pub fn enable_ansi_support() {}
#[cfg(windows)]
pub fn press_any_key() {
dont_disappear::any_key_to_continue::default();
}
#[cfg(not(windows))]
pub fn press_any_key() {}
| 27.580838 | 124 | 0.700608 |
e4ae1994852b823cb8eeaab0cff14d9156f9d7fd | 53 | pub use ark_bn254::{Fr as Fq, FrConfig as FqConfig};
| 26.5 | 52 | 0.735849 |
69a4c59d5c6521c41aab99dce4526c3a7dd9f378 | 10,019 | mod error;
use dashmap::DashMap;
use error::{ChunkingRequest, InvalidPath, RequestError, RequestIssue};
use http::{request::Parts, Method as HttpMethod};
use hyper::{
body::Body,
server::{conn::AddrStream, Server},
service, Request, Response,
};
use snafu::ResultExt;
use std::{
convert::TryFrom,
env,
error::Error,
net::{IpAddr, SocketAddr},
str::FromStr,
sync::Arc,
};
use tracing::{debug, error, info, trace};
use tracing_log::LogTracer;
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
use twilight_http::{
client::Client,
request::{Method, Request as TwilightRequest},
routing::Path,
API_VERSION,
};
#[cfg(feature = "expose-metrics")]
use std::{future::Future, pin::Pin, sync::Arc, time::Instant};
#[cfg(feature = "expose-metrics")]
use lazy_static::lazy_static;
#[cfg(feature = "expose-metrics")]
use metrics::histogram;
#[cfg(feature = "expose-metrics")]
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
#[cfg(feature = "expose-metrics")]
lazy_static! {
static ref METRIC_KEY: String =
env::var("METRIC_KEY").unwrap_or_else(|_| "twilight_http_proxy".into());
}
type ClientMap = Arc<DashMap<Option<String>, Client>>;
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
LogTracer::init()?;
let log_filter_layer =
EnvFilter::try_from_default_env().or_else(|_| EnvFilter::try_new("info"))?;
let log_fmt_layer = fmt::layer();
let log_subscriber = tracing_subscriber::registry()
.with(log_filter_layer)
.with(log_fmt_layer);
tracing::subscriber::set_global_default(log_subscriber)?;
let host_raw = env::var("HOST").unwrap_or_else(|_| "0.0.0.0".into());
let host = IpAddr::from_str(&host_raw)?;
let port = env::var("PORT").unwrap_or_else(|_| "80".into()).parse()?;
let clients: ClientMap = Arc::new(DashMap::new());
let address = SocketAddr::from((host, port));
#[cfg(feature = "expose-metrics")]
let handle: Arc<PrometheusHandle>;
#[cfg(feature = "expose-metrics")]
{
let recorder = PrometheusBuilder::new().build();
handle = Arc::new(recorder.handle());
metrics::set_boxed_recorder(Box::new(recorder))
.expect("Failed to create metrics receiver!");
}
// The closure inside `make_service_fn` is run for each connection,
// creating a 'service' to handle requests for that specific connection.
let service = service::make_service_fn(move |addr: &AddrStream| {
debug!("Connection from: {:?}", addr);
let clients = clients.clone();
#[cfg(feature = "expose-metrics")]
let handle = handle.clone();
async move {
Ok::<_, RequestError>(service::service_fn(move |incoming: Request<Body>| {
#[cfg(feature = "expose-metrics")]
{
let uri = incoming.uri();
if uri.path() == "/metrics" {
handle_metrics(handle.clone())
} else {
Box::pin(handle_request(clients.clone(), incoming))
}
}
#[cfg(not(feature = "expose-metrics"))]
{
handle_request(clients.clone(), incoming)
}
}))
}
});
let server = Server::bind(&address).serve(service);
info!("Listening on http://{}", address);
if let Err(why) = server.await {
error!("Fatal server error: {}", why);
}
Ok(())
}
fn path_name(path: &Path) -> &'static str {
match path {
Path::ChannelsId(..) => "Channel",
Path::ChannelsIdInvites(..) => "Channel invite",
Path::ChannelsIdMessages(..) => "Channel message",
Path::ChannelsIdMessagesBulkDelete(..) => "Bulk delete message",
Path::ChannelsIdMessagesId(..) => "Channel message",
Path::ChannelsIdMessagesIdReactions(..) => "Message reaction",
Path::ChannelsIdMessagesIdReactionsUserIdType(..) => "Message reaction for user",
Path::ChannelsIdPermissionsOverwriteId(..) => "Channel permission override",
Path::ChannelsIdPins(..) => "Channel pins",
Path::ChannelsIdPinsMessageId(..) => "Specific channel pin",
Path::ChannelsIdTyping(..) => "Typing indicator",
Path::ChannelsIdWebhooks(..) => "Webhook",
Path::Gateway => "Gateway",
Path::GatewayBot => "Gateway bot info",
Path::Guilds => "Guilds",
Path::GuildsId(..) => "Guild",
Path::GuildsIdBans(..) => "Guild bans",
Path::GuildsIdAuditLogs(..) => "Guild audit logs",
Path::GuildsIdBansUserId(..) => "Guild ban for user",
Path::GuildsIdChannels(..) => "Guild channel",
Path::GuildsIdWidget(..) => "Guild widget",
Path::GuildsIdEmojis(..) => "Guild emoji",
Path::GuildsIdEmojisId(..) => "Specific guild emoji",
Path::GuildsIdIntegrations(..) => "Guild integrations",
Path::GuildsIdIntegrationsId(..) => "Specific guild integration",
Path::GuildsIdIntegrationsIdSync(..) => "Sync guild integration",
Path::GuildsIdInvites(..) => "Guild invites",
Path::GuildsIdMembers(..) => "Guild members",
Path::GuildsIdMembersId(..) => "Specific guild member",
Path::GuildsIdMembersIdRolesId(..) => "Guild member role",
Path::GuildsIdMembersMeNick(..) => "Modify own nickname",
Path::GuildsIdPreview(..) => "Guild preview",
Path::GuildsIdPrune(..) => "Guild prune",
Path::GuildsIdRegions(..) => "Guild region",
Path::GuildsIdRoles(..) => "Guild roles",
Path::GuildsIdRolesId(..) => "Specific guild role",
Path::GuildsIdVanityUrl(..) => "Guild vanity invite",
Path::GuildsIdWebhooks(..) => "Guild webhooks",
Path::InvitesCode => "Invite info",
Path::UsersId => "User info",
Path::UsersIdConnections => "User connections",
Path::UsersIdChannels => "User channels",
Path::UsersIdGuilds => "User in guild",
Path::UsersIdGuildsId => "Guild from user",
Path::VoiceRegions => "Voice region list",
Path::WebhooksId(..) => "Webhook",
Path::OauthApplicationsMe => "Current application info",
Path::ChannelsIdMessagesIdCrosspost(..) => "Crosspost message",
Path::ChannelsIdRecipients(..) => "Channel recipients",
Path::ChannelsIdFollowers(..) => "Channel followers",
Path::GuildsIdBansId(..) => "Specific guild ban",
Path::GuildsIdMembersSearch(..) => "Search guild members",
Path::GuildsIdTemplates(..) => "Guild templates",
Path::GuildsIdTemplatesCode(..) => "Specific guild template",
Path::GuildsIdVoiceStates(..) => "Guild voice states",
Path::GuildsIdWelcomeScreen(..) => "Guild welcome screen",
Path::WebhooksIdTokenMessagesId(..) => "Specific webhook message",
_ => "Unknown path!",
}
}
async fn handle_request(
clients: ClientMap,
request: Request<Body>,
) -> Result<Response<Body>, RequestError> {
let api_url: String = format!("/api/v{}/", API_VERSION);
debug!("Incoming request: {:?}", request);
let (parts, body) = request.into_parts();
let Parts {
method,
uri,
headers,
..
} = parts;
<<<<<<< HEAD
let auth_token = headers
.get(http::header::AUTHORIZATION)
.and_then(|token| token.to_str().ok())
.map(|token| token.to_owned());
let client = if let Some(client) = clients.get(&auth_token) {
client.clone()
} else {
let mut builder = Client::builder();
if let Some(ref token) = auth_token {
builder = builder.token(token.as_str());
}
let client = builder.build();
clients.insert(auth_token, client.clone());
client
=======
let (method, m) = match method {
HttpMethod::DELETE => (Method::Delete, "DELETE"),
HttpMethod::GET => (Method::Get, "GET"),
HttpMethod::PATCH => (Method::Patch, "PATCH"),
HttpMethod::POST => (Method::Post, "POST"),
HttpMethod::PUT => (Method::Put, "PUT"),
_ => {
error!("Unsupported HTTP method in request");
return Err(RequestError::InvalidMethod { method });
}
>>>>>>> de653114c5d2a26cbb284a8d3c2f78acb2821f6c
};
let trimmed_path = if uri.path().starts_with(&api_url) {
uri.path().replace(&api_url, "")
} else {
uri.path().to_owned()
};
let path = Path::try_from((method, trimmed_path.as_ref())).context(InvalidPath)?;
let bytes = (hyper::body::to_bytes(body).await.context(ChunkingRequest)?).to_vec();
let path_and_query = match uri.path_and_query() {
Some(v) => v.as_str().replace(&api_url, "").into(),
None => {
debug!("No path in URI: {:?}", uri);
return Err(RequestError::NoPath { uri });
}
};
let body = if bytes.is_empty() { None } else { Some(bytes) };
let p = path_name(&path);
let raw_request = TwilightRequest {
body,
form: None,
headers: Some(headers),
method,
path,
path_str: path_and_query,
};
#[cfg(feature = "expose-metrics")]
let start = Instant::now();
let resp = client.raw(raw_request).await.context(RequestIssue)?;
#[cfg(feature = "expose-metrics")]
let end = Instant::now();
trace!("Response: {:?}", resp);
#[cfg(feature = "expose-metrics")]
histogram!(METRIC_KEY.as_str(), end - start, "method"=>m.to_string(), "route"=>p, "status"=>resp.status().to_string());
debug!("{} {}: {}", m, p, resp.status());
Ok(resp)
}
#[cfg(feature = "expose-metrics")]
fn handle_metrics(
handle: Arc<PrometheusHandle>,
) -> Pin<Box<dyn Future<Output = Result<Response<Body>, RequestError>> + Send>> {
Box::pin(async move {
Ok(Response::builder()
.body(Body::from(handle.render()))
.unwrap())
})
}
| 35.031469 | 123 | 0.591077 |
b9fb9a7a1a0179882465f525b9611f8133b54d54 | 16,007 | //! Exception handlers that are task-aware, and will kill a task on an exception.
#![no_std]
#![feature(abi_x86_interrupt)]
extern crate x86_64;
extern crate task;
// extern crate apic;
extern crate tlb_shootdown;
extern crate pmu_x86;
#[macro_use] extern crate log;
#[macro_use] extern crate vga_buffer; // for println_raw!()
#[macro_use] extern crate print; // for regular println!()
extern crate unwind;
extern crate debug_info;
extern crate gimli;
extern crate memory;
extern crate stack_trace;
extern crate fault_log;
use memory::{VirtualAddress, Page};
use x86_64::{
registers::{
control_regs,
msr::*,
},
structures::idt::{
LockedIdt,
ExceptionStackFrame,
PageFaultErrorCode
},
};
use fault_log::log_exception;
pub fn init(idt_ref: &'static LockedIdt) {
{
let mut idt = idt_ref.lock(); // withholds interrupts
// SET UP FIXED EXCEPTION HANDLERS
idt.divide_by_zero.set_handler_fn(divide_by_zero_handler);
idt.debug.set_handler_fn(debug_handler);
idt.non_maskable_interrupt.set_handler_fn(nmi_handler);
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.overflow.set_handler_fn(overflow_handler);
idt.bound_range_exceeded.set_handler_fn(bound_range_exceeded_handler);
idt.invalid_opcode.set_handler_fn(invalid_opcode_handler);
idt.device_not_available.set_handler_fn(device_not_available_handler);
idt.double_fault.set_handler_fn(double_fault_handler);
// reserved: 0x09 coprocessor segment overrun exception
idt.invalid_tss.set_handler_fn(invalid_tss_handler);
idt.segment_not_present.set_handler_fn(segment_not_present_handler);
// missing: 0x0c stack segment exception
idt.general_protection_fault.set_handler_fn(general_protection_fault_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
// reserved: 0x0f vector 15
// missing: 0x10 floating point exception
// missing: 0x11 alignment check exception
// missing: 0x12 machine check exception
// missing: 0x13 SIMD floating point exception
// missing: 0x14 virtualization vector 20
// missing: 0x15 - 0x1d SIMD floating point exception
// missing: 0x1e security exception
// reserved: 0x1f
}
idt_ref.load();
}
/// calls println!() and then println_raw!()
macro_rules! println_both {
($fmt:expr) => {
print_raw!(concat!($fmt, "\n"));
print!(concat!($fmt, "\n"));
};
($fmt:expr, $($arg:tt)*) => {
print_raw!(concat!($fmt, "\n"), $($arg)*);
print!(concat!($fmt, "\n"), $($arg)*);
};
}
/// Kills the current task (the one that caused an exception) by unwinding it.
///
/// # Important Note
/// Currently, unwinding a task after an exception does not fully work like it does for panicked tasks.
/// The problem is that unwinding cleanup routines (landing pads) are generated *only if* a panic can actually occur.
/// Since machine exceptions can occur anywhere at any time (beneath the language level),
///
/// Currently, what will happen is that all stack frames will be unwound properly **except**
/// for the one during which the exception actually occurred;
/// the "excepted"/interrupted frame may be cleaned up properly, but it is unlikely.
///
/// However, stack traces / backtraces work, so we are correctly traversing call stacks with exception frames.
///
#[inline(never)]
fn kill_and_halt(exception_number: u8, stack_frame: &ExceptionStackFrame, print_stack_trace: bool) {
#[cfg(all(unwind_exceptions, not(downtime_eval)))] {
println_both!("Unwinding {:?} due to exception {}.", task::get_my_current_task(), exception_number);
}
#[cfg(not(unwind_exceptions))] {
println_both!("Killing task without unwinding {:?} due to exception {}. (cfg `unwind_exceptions` is not set.)", task::get_my_current_task(), exception_number);
}
// Dump some info about the this loaded app crate
// and test out using debug info for recovery
if false {
let curr_task = task::get_my_current_task().expect("kill_and_halt: no current task");
let app_crate = {
let t = curr_task.lock();
t.app_crate.as_ref().expect("kill_and_halt: no app_crate").clone_shallow()
};
let debug_symbols_file = {
let krate = app_crate.lock_as_ref();
trace!("============== Crate {} =================", krate.crate_name);
for s in krate.sections.values() {
trace!(" {:?}", &*s);
}
krate.debug_symbols_file.clone()
};
if false {
let mut debug = debug_info::DebugSymbols::Unloaded(debug_symbols_file);
let debug_sections = debug.load(&app_crate, &curr_task.get_namespace()).unwrap();
let instr_ptr = stack_frame.instruction_pointer.0 - 1; // points to the next instruction (at least for a page fault)
let res = debug_sections.find_subprogram_containing(VirtualAddress::new_canonical(instr_ptr));
debug!("Result of find_subprogram_containing: {:?}", res);
}
}
// print a stack trace
#[cfg(not(downtime_eval))] {
if print_stack_trace {
println_both!("------------------ Stack Trace (DWARF) ---------------------------");
let stack_trace_result = stack_trace::stack_trace(
&|stack_frame, stack_frame_iter| {
let symbol_offset = stack_frame_iter.namespace().get_section_containing_address(
VirtualAddress::new_canonical(stack_frame.call_site_address() as usize),
false
).map(|(sec, offset)| (sec.name.clone(), offset));
if let Some((symbol_name, offset)) = symbol_offset {
println_both!(" {:>#018X} in {} + {:#X}", stack_frame.call_site_address(), symbol_name, offset);
} else {
println_both!(" {:>#018X} in ??", stack_frame.call_site_address());
}
true
},
None,
);
match stack_trace_result {
Ok(()) => { println_both!(" Beginning of stack"); }
Err(e) => { println_both!(" {}", e); }
}
println_both!("---------------------- End of Stack Trace ------------------------");
}
}
let cause = task::KillReason::Exception(exception_number);
// Call this task's kill handler, if it has one.
{
let kill_handler = task::get_my_current_task().and_then(|t| t.take_kill_handler());
if let Some(ref kh_func) = kill_handler {
#[cfg(not(downtime_eval))]
debug!("Found kill handler callback to invoke in Task {:?}", task::get_my_current_task());
kh_func(&cause);
}
else {
#[cfg(not(downtime_eval))]
debug!("No kill handler callback in Task {:?}", task::get_my_current_task());
}
}
// Unwind the current task that failed due to the given exception.
// This doesn't always work perfectly, so it's disabled by default for now.
#[cfg(unwind_exceptions)] {
// skip 2 frames: `start_unwinding` and `kill_and_halt`
match unwind::start_unwinding(cause, 2) {
Ok(_) => {
println_both!("BUG: when handling exception {}, start_unwinding() returned an Ok() value, \
which is unexpected because it means no unwinding actually occurred. Task: {:?}.",
exception_number,
task::get_my_current_task()
);
}
Err(e) => {
println_both!("Task {:?} was unable to start unwinding procedure after exception {}, error: {}.",
task::get_my_current_task(), exception_number, e
);
}
}
}
#[cfg(not(unwind_exceptions))] {
let res = task::get_my_current_task().ok_or("couldn't get current task").and_then(|taskref| taskref.kill(cause));
match res {
Ok(()) => { println_both!("Task {:?} killed itself successfully", task::get_my_current_task()); }
Err(e) => { println_both!("Task {:?} was unable to kill itself. Error: {:?}", task::get_my_current_task(), e); }
}
}
// If we failed to handle the exception and unwind the task, there's not really much we can do about it,
// other than just let the thread spin endlessly (which doesn't hurt correctness but is inefficient).
// But in general, this task should have already been marked as killed and thus no longer schedulable,
// so it should not reach this point.
// Only exceptions during the early OS initialization process will get here, meaning that the OS will basically stop.
loop { }
}
/// Checks whether the given `vaddr` falls within a stack guard page, indicating stack overflow.
fn is_stack_overflow(vaddr: VirtualAddress) -> bool {
let page = Page::containing_address(vaddr);
task::get_my_current_task()
.map(|curr_task| curr_task.lock().kstack.guard_page().contains(&page))
.unwrap_or(false)
}
/// exception 0x00
pub extern "x86-interrupt" fn divide_by_zero_handler(stack_frame: &mut ExceptionStackFrame) {
println_both!("\nEXCEPTION: DIVIDE BY ZERO\n{:#?}\n", stack_frame);
log_exception(0x0, stack_frame.instruction_pointer.0, None, None);
kill_and_halt(0x0, stack_frame, true)
}
/// exception 0x01
pub extern "x86-interrupt" fn debug_handler(stack_frame: &mut ExceptionStackFrame) {
println_both!("\nEXCEPTION: DEBUG at {:#X}\n{:#?}\n",
stack_frame.instruction_pointer,
stack_frame);
// don't halt here, this isn't a fatal/permanent failure, just a brief pause.
}
/// exception 0x02, also used for TLB Shootdown IPIs and sampling interrupts
extern "x86-interrupt" fn nmi_handler(stack_frame: &mut ExceptionStackFrame) {
let mut expected_nmi = false;
// sampling interrupt handler: increments a counter, records the IP for the sample, and resets the hardware counter
if rdmsr(IA32_PERF_GLOBAL_STAUS) != 0 {
if let Err(e) = pmu_x86::handle_sample(stack_frame) {
println_both!("nmi_handler::pmu_x86: sample couldn't be recorded: {:?}", e);
}
expected_nmi = true;
}
// currently we're using NMIs to send TLB shootdown IPIs
{
let pages_to_invalidate = tlb_shootdown::TLB_SHOOTDOWN_IPI_PAGES.read().clone();
if let Some(pages) = pages_to_invalidate {
// trace!("nmi_handler (AP {})", apic::get_my_apic_id());
tlb_shootdown::handle_tlb_shootdown_ipi(pages);
expected_nmi = true;
}
}
if expected_nmi {
return;
}
println_both!("\nEXCEPTION: NON-MASKABLE INTERRUPT at {:#X}\n{:#?}\n",
stack_frame.instruction_pointer,
stack_frame);
log_exception(0x2, stack_frame.instruction_pointer.0, None, None);
kill_and_halt(0x2, stack_frame, true)
}
/// exception 0x03
pub extern "x86-interrupt" fn breakpoint_handler(stack_frame: &mut ExceptionStackFrame) {
println_both!("\nEXCEPTION: BREAKPOINT at {:#X}\n{:#?}\n",
stack_frame.instruction_pointer,
stack_frame);
// don't halt here, this isn't a fatal/permanent failure, just a brief pause.
}
/// exception 0x04
pub extern "x86-interrupt" fn overflow_handler(stack_frame: &mut ExceptionStackFrame) {
println_both!("\nEXCEPTION: OVERFLOW at {:#X}\n{:#?}\n",
stack_frame.instruction_pointer,
stack_frame);
log_exception(0x4, stack_frame.instruction_pointer.0, None, None);
kill_and_halt(0x4, stack_frame, true)
}
// exception 0x05
pub extern "x86-interrupt" fn bound_range_exceeded_handler(stack_frame: &mut ExceptionStackFrame) {
println_both!("\nEXCEPTION: BOUND RANGE EXCEEDED at {:#X}\n{:#?}\n",
stack_frame.instruction_pointer,
stack_frame);
log_exception(0x5, stack_frame.instruction_pointer.0, None, None);
kill_and_halt(0x5, stack_frame, true)
}
/// exception 0x06
pub extern "x86-interrupt" fn invalid_opcode_handler(stack_frame: &mut ExceptionStackFrame) {
println_both!("\nEXCEPTION: INVALID OPCODE at {:#X}\n{:#?}\n",
stack_frame.instruction_pointer,
stack_frame);
log_exception(0x6, stack_frame.instruction_pointer.0, None, None);
kill_and_halt(0x6, stack_frame, true)
}
/// exception 0x07
/// see this: http://wiki.osdev.org/I_Cant_Get_Interrupts_Working#I_keep_getting_an_IRQ7_for_no_apparent_reason
pub extern "x86-interrupt" fn device_not_available_handler(stack_frame: &mut ExceptionStackFrame) {
println_both!("\nEXCEPTION: DEVICE_NOT_AVAILABLE at {:#X}\n{:#?}\n",
stack_frame.instruction_pointer,
stack_frame);
log_exception(0x7, stack_frame.instruction_pointer.0, None, None);
kill_and_halt(0x7, stack_frame, true)
}
/// exception 0x08
pub extern "x86-interrupt" fn double_fault_handler(stack_frame: &mut ExceptionStackFrame, error_code: u64) {
let accessed_vaddr = control_regs::cr2();
println_both!("\nEXCEPTION: DOUBLE FAULT\n{:#?}\nTried to access {:#X}
Note: double faults in Theseus are typically caused by stack overflow, is the stack large enough?",
stack_frame, accessed_vaddr,
);
if is_stack_overflow(VirtualAddress::new_canonical(accessed_vaddr.0)) {
println_both!("--> This double fault was definitely caused by stack overflow, tried to access {:#X}.\n", accessed_vaddr);
}
log_exception(0x8, stack_frame.instruction_pointer.0, Some(error_code), None);
kill_and_halt(0x8, stack_frame, false)
}
/// exception 0x0a
pub extern "x86-interrupt" fn invalid_tss_handler(stack_frame: &mut ExceptionStackFrame, error_code: u64) {
println_both!("\nEXCEPTION: INVALID_TSS FAULT\nerror code: \
{:#b}\n{:#?}\n",
error_code,
stack_frame);
log_exception(0xA, stack_frame.instruction_pointer.0, Some(error_code), None);
kill_and_halt(0xA, stack_frame, true)
}
/// exception 0x0b
pub extern "x86-interrupt" fn segment_not_present_handler(stack_frame: &mut ExceptionStackFrame, error_code: u64) {
println_both!("\nEXCEPTION: SEGMENT_NOT_PRESENT FAULT\nerror code: \
{:#b}\n{:#?}\n",
error_code,
stack_frame);
log_exception(0xB, stack_frame.instruction_pointer.0, Some(error_code), None);
kill_and_halt(0xB, stack_frame, true)
}
/// exception 0x0d
pub extern "x86-interrupt" fn general_protection_fault_handler(stack_frame: &mut ExceptionStackFrame, error_code: u64) {
println_both!("\nEXCEPTION: GENERAL PROTECTION FAULT \nerror code: \
{:#X}\n{:#?}\n",
error_code,
stack_frame);
log_exception(0xD, stack_frame.instruction_pointer.0, Some(error_code), None);
kill_and_halt(0xD, stack_frame, true)
}
/// exception 0x0e
pub extern "x86-interrupt" fn page_fault_handler(stack_frame: &mut ExceptionStackFrame, error_code: PageFaultErrorCode) {
let accessed_vaddr = control_regs::cr2();
#[cfg(not(downtime_eval))] {
println_both!("\nEXCEPTION: PAGE FAULT while accessing {:#X}\nerror code: \
{:?}\n{:#?}",
control_regs::cr2(),
error_code,
stack_frame,
);
if is_stack_overflow(VirtualAddress::new_canonical(accessed_vaddr.0)) {
println_both!("--> Page fault was caused by stack overflow, tried to access {:#X}\n.", accessed_vaddr);
}
}
log_exception(0xD, stack_frame.instruction_pointer.0, None, Some(control_regs::cr2().0));
kill_and_halt(0xE, stack_frame, true)
}
// exception 0x0F is reserved on x86
| 40.524051 | 167 | 0.641469 |
5bfb4d1b3b20389546be7f1fba6538b64fe5165f | 10,006 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::session::Session;
use generated_code;
use std::cell::Cell;
use std::env;
use std::path::Path;
use syntax::parse::lexer::{self, StringReader};
use syntax::parse::token::{self, Token};
use syntax::symbol::keywords;
use syntax_pos::*;
#[derive(Clone)]
pub struct SpanUtils<'a> {
pub sess: &'a Session,
// FIXME given that we clone SpanUtils all over the place, this err_count is
// probably useless and any logic relying on it is bogus.
pub err_count: Cell<isize>,
}
impl<'a> SpanUtils<'a> {
pub fn new(sess: &'a Session) -> SpanUtils<'a> {
SpanUtils {
sess,
err_count: Cell::new(0),
}
}
pub fn make_path_string(file_name: &str) -> String {
let path = Path::new(file_name);
if path.is_absolute() {
path.clone().display().to_string()
} else {
env::current_dir().unwrap().join(&path).display().to_string()
}
}
pub fn snippet(&self, span: Span) -> String {
match self.sess.codemap().span_to_snippet(span) {
Ok(s) => s,
Err(_) => String::new(),
}
}
pub fn retokenise_span(&self, span: Span) -> StringReader<'a> {
lexer::StringReader::retokenize(&self.sess.parse_sess, span)
}
// Re-parses a path and returns the span for the last identifier in the path
pub fn span_for_last_ident(&self, span: Span) -> Option<Span> {
let mut result = None;
let mut toks = self.retokenise_span(span);
let mut bracket_count = 0;
loop {
let ts = toks.real_token();
if ts.tok == token::Eof {
return result
}
if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) {
result = Some(ts.sp);
}
bracket_count += match ts.tok {
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0,
}
}
}
// Return the span for the first identifier in the path.
pub fn span_for_first_ident(&self, span: Span) -> Option<Span> {
let mut toks = self.retokenise_span(span);
let mut bracket_count = 0;
loop {
let ts = toks.real_token();
if ts.tok == token::Eof {
return None;
}
if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) {
return Some(ts.sp);
}
bracket_count += match ts.tok {
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0,
}
}
}
// Return the span for the last ident before a `<` and outside any
// angle brackets, or the last span.
pub fn sub_span_for_type_name(&self, span: Span) -> Option<Span> {
let mut toks = self.retokenise_span(span);
let mut prev = toks.real_token();
let mut result = None;
// We keep track of the following two counts - the depth of nesting of
// angle brackets, and the depth of nesting of square brackets. For the
// angle bracket count, we only count tokens which occur outside of any
// square brackets (i.e. bracket_count == 0). The intutition here is
// that we want to count angle brackets in the type, but not any which
// could be in expression context (because these could mean 'less than',
// etc.).
let mut angle_count = 0;
let mut bracket_count = 0;
loop {
let next = toks.real_token();
if (next.tok == token::Lt || next.tok == token::Colon) &&
angle_count == 0 &&
bracket_count == 0 &&
prev.tok.is_ident() {
result = Some(prev.sp);
}
if bracket_count == 0 {
angle_count += match prev.tok {
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shl) => 2,
token::BinOp(token::Shr) => -2,
_ => 0,
};
}
bracket_count += match prev.tok {
token::OpenDelim(token::Bracket) => 1,
token::CloseDelim(token::Bracket) => -1,
_ => 0,
};
if next.tok == token::Eof {
break;
}
prev = next;
}
if angle_count != 0 || bracket_count != 0 {
let loc = self.sess.codemap().lookup_char_pos(span.lo());
span_bug!(span,
"Mis-counted brackets when breaking path? Parsing '{}' \
in {}, line {}",
self.snippet(span),
loc.file.name,
loc.line);
}
if result.is_none() && prev.tok.is_ident() && angle_count == 0 {
return Some(prev.sp);
}
result
}
pub fn sub_span_before_token(&self, span: Span, tok: Token) -> Option<Span> {
let mut toks = self.retokenise_span(span);
let mut prev = toks.real_token();
loop {
if prev.tok == token::Eof {
return None;
}
let next = toks.real_token();
if next.tok == tok {
return Some(prev.sp);
}
prev = next;
}
}
pub fn sub_span_of_token(&self, span: Span, tok: Token) -> Option<Span> {
let mut toks = self.retokenise_span(span);
loop {
let next = toks.real_token();
if next.tok == token::Eof {
return None;
}
if next.tok == tok {
return Some(next.sp);
}
}
}
pub fn sub_span_after_keyword(&self, span: Span, keyword: keywords::Keyword) -> Option<Span> {
self.sub_span_after(span, |t| t.is_keyword(keyword))
}
pub fn sub_span_after_token(&self, span: Span, tok: Token) -> Option<Span> {
self.sub_span_after(span, |t| t == tok)
}
fn sub_span_after<F: Fn(Token) -> bool>(&self, span: Span, f: F) -> Option<Span> {
let mut toks = self.retokenise_span(span);
loop {
let ts = toks.real_token();
if ts.tok == token::Eof {
return None;
}
if f(ts.tok) {
let ts = toks.real_token();
if ts.tok == token::Eof {
return None
} else {
return Some(ts.sp);
}
}
}
}
// // Return the name for a macro definition (identifier after first `!`)
// pub fn span_for_macro_def_name(&self, span: Span) -> Option<Span> {
// let mut toks = self.retokenise_span(span);
// loop {
// let ts = toks.real_token();
// if ts.tok == token::Eof {
// return None;
// }
// if ts.tok == token::Not {
// let ts = toks.real_token();
// if ts.tok.is_ident() {
// return Some(ts.sp);
// } else {
// return None;
// }
// }
// }
// }
// // Return the name for a macro use (identifier before first `!`).
// pub fn span_for_macro_use_name(&self, span:Span) -> Option<Span> {
// let mut toks = self.retokenise_span(span);
// let mut prev = toks.real_token();
// loop {
// if prev.tok == token::Eof {
// return None;
// }
// let ts = toks.real_token();
// if ts.tok == token::Not {
// if prev.tok.is_ident() {
// return Some(prev.sp);
// } else {
// return None;
// }
// }
// prev = ts;
// }
// }
/// Return true if the span is generated code, and
/// it is not a subspan of the root callsite.
///
/// Used to filter out spans of minimal value,
/// such as references to macro internal variables.
pub fn filter_generated(&self, sub_span: Option<Span>, parent: Span) -> bool {
if !generated_code(parent) {
if sub_span.is_none() {
// Edge case - this occurs on generated code with incorrect expansion info.
return true;
}
return false;
}
// If sub_span is none, filter out generated code.
let sub_span = match sub_span {
Some(ss) => ss,
None => return true,
};
//If the span comes from a fake filemap, filter it.
if !self.sess.codemap().lookup_char_pos(parent.lo()).file.is_real_file() {
return true;
}
// Otherwise, a generated span is deemed invalid if it is not a sub-span of the root
// callsite. This filters out macro internal variables and most malformed spans.
!parent.source_callsite().contains(sub_span)
}
}
macro_rules! filter {
($util: expr, $span: expr, $parent: expr, None) => {
if $util.filter_generated($span, $parent) {
return None;
}
};
($util: expr, $span: ident, $parent: expr) => {
if $util.filter_generated($span, $parent) {
return;
}
};
}
| 33.023102 | 100 | 0.501299 |
c13b84e7beb42f3a4e33d4ba68fa02004b9bdd75 | 5,724 | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
mod test;
use {
anyhow::*,
fidl::endpoints::{create_proxy, ServerEnd},
fidl_fuchsia_component_runner as fcrunner, fidl_fuchsia_io as fio, fidl_fuchsia_test as ftest,
fuchsia_async as fasync,
fuchsia_component::server::ServiceFs,
fuchsia_zircon as zx,
futures::prelude::*,
log::{error, info},
test::StressTest,
};
#[fuchsia::main]
async fn main() -> Result<()> {
let mut fs = ServiceFs::new();
fs.dir("svc").add_fidl_service(move |stream| {
fasync::Task::spawn(serve_runner(stream).map(|r| info!("Serving runner: {:?}", r)))
.detach();
});
fs.take_and_serve_directory_handle()?;
fs.collect::<()>().await;
Ok(())
}
async fn serve_runner(mut stream: fcrunner::ComponentRunnerRequestStream) -> Result<()> {
while let Some(fcrunner::ComponentRunnerRequest::Start { start_info, controller, .. }) =
stream.try_next().await?
{
let controller_stream = controller.into_stream()?;
fasync::Task::spawn(
serve_controller(controller_stream).map(|r| info!("Serving controller: {:?}", r)),
)
.detach();
let dictionary = start_info.program.ok_or(anyhow!("No program dictionary"))?;
let dictionary = dictionary.entries.ok_or(anyhow!("No entries in program dictionary"))?;
let namespace = start_info.ns.ok_or(anyhow!("No incoming namespace"))?;
let test = StressTest::new(dictionary, namespace)?;
let out_dir = start_info.outgoing_dir.ok_or(anyhow!("No outgoing directory"))?;
fasync::Task::spawn(
serve_out_dir(out_dir, test).map(|r| info!("Serving out dir: {:?}", r)),
)
.detach();
}
Ok(())
}
/// TODO(xbhatnag): All futures should be aborted when a Stop/Kill request is received.
async fn serve_controller(mut stream: fcrunner::ComponentControllerRequestStream) -> Result<()> {
if let Some(request) = stream.try_next().await? {
info!("Received controller request: {:?}", request);
}
Ok(())
}
async fn serve_out_dir(out_dir: ServerEnd<fio::DirectoryMarker>, test: StressTest) -> Result<()> {
let mut fs = ServiceFs::new();
fs.dir("svc").add_fidl_service(move |stream| {
fasync::Task::spawn(
serve_test_suite(stream, test.clone()).map(|r| info!("Serving test suite: {:?}", r)),
)
.detach();
});
fs.serve_connection(out_dir.into_channel())?;
fs.collect::<()>().await;
Ok(())
}
/// Implements `fuchsia.test.Suite` service and runs test.
async fn serve_test_suite(mut stream: ftest::SuiteRequestStream, test: StressTest) -> Result<()> {
while let Some(event) = stream.try_next().await? {
match event {
ftest::SuiteRequest::GetTests { iterator, control_handle: _ } => {
fasync::Task::spawn(
serve_case_iterator(iterator).map(|r| info!("Serving case iterator: {:?}", r)),
)
.detach();
}
ftest::SuiteRequest::Run { tests, listener, .. } => {
let listener = listener.into_proxy()?;
for invocation in tests {
let (case_listener, server_end) = create_proxy::<ftest::CaseListenerMarker>()?;
// TODO(84852): Use stderr to print status of actors
let (stderr_client, stderr_server) =
zx::Socket::create(zx::SocketOpts::DATAGRAM)?;
let std_handles = ftest::StdHandles {
out: None,
err: Some(stderr_server),
..ftest::StdHandles::EMPTY
};
listener.on_test_case_started(invocation, std_handles, server_end)?;
let result = if let Err(e) = test.clone().start().await {
if let Err(status) = stderr_client.write(e.to_string().as_bytes()) {
error!(
"Failed to write error to stderr socket [write status={}][error={:?}]",
status, e
)
}
ftest::Result_ {
status: Some(ftest::Status::Failed),
..ftest::Result_::EMPTY
}
} else {
ftest::Result_ {
status: Some(ftest::Status::Passed),
..ftest::Result_::EMPTY
}
};
case_listener.finished(result)?;
}
listener.on_finished()?;
}
}
}
Ok(())
}
async fn serve_case_iterator(iterator: ServerEnd<ftest::CaseIteratorMarker>) -> Result<()> {
let mut stream = iterator.into_stream()?;
let cases = vec![ftest::Case {
name: Some("stress_test".to_string()),
enabled: Some(true),
..ftest::Case::EMPTY
}];
let mut iter = cases.into_iter();
// Send the `stress_test` case in the first call
if let Some(ftest::CaseIteratorRequest::GetNext { responder }) = stream.try_next().await? {
responder.send(&mut iter)?;
}
// Send an empty response in the second call. This instructs the test_manager that there are
// no more cases in this test suite.
if let Some(ftest::CaseIteratorRequest::GetNext { responder }) = stream.try_next().await? {
responder.send(&mut vec![].into_iter())?;
}
Ok(())
}
| 38.416107 | 103 | 0.554158 |
c1e4815e535499d8a9b866c44b7c7d77e4d71d5c | 14,926 | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{snippet::Style, Applicability, CodeSuggestion, Level, Substitution, SubstitutionPart};
use crate::syntax_pos::{MultiSpan, Span};
use std::fmt;
#[must_use]
#[derive(Clone, Debug, PartialEq, Hash)]
#[cfg_attr(
feature = "diagnostic-serde",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct Diagnostic {
pub level: Level,
pub message: Vec<(String, Style)>,
pub code: Option<DiagnosticId>,
pub span: MultiSpan,
pub children: Vec<SubDiagnostic>,
pub suggestions: Vec<CodeSuggestion>,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(
feature = "diagnostic-serde",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum DiagnosticId {
Error(String),
Lint(String),
}
/// For example a note attached to an error.
#[derive(Clone, Debug, PartialEq, Hash)]
#[cfg_attr(
feature = "diagnostic-serde",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct SubDiagnostic {
pub level: Level,
pub message: Vec<(String, Style)>,
pub span: MultiSpan,
pub render_span: Option<MultiSpan>,
}
#[derive(PartialEq, Eq, Default)]
pub struct DiagnosticStyledString(pub Vec<StringPart>);
impl DiagnosticStyledString {
pub fn new() -> DiagnosticStyledString {
Default::default()
}
pub fn push_normal<S: Into<String>>(&mut self, t: S) {
self.0.push(StringPart::Normal(t.into()));
}
pub fn push_highlighted<S: Into<String>>(&mut self, t: S) {
self.0.push(StringPart::Highlighted(t.into()));
}
pub fn normal<S: Into<String>>(t: S) -> DiagnosticStyledString {
DiagnosticStyledString(vec![StringPart::Normal(t.into())])
}
pub fn highlighted<S: Into<String>>(t: S) -> DiagnosticStyledString {
DiagnosticStyledString(vec![StringPart::Highlighted(t.into())])
}
pub fn content(&self) -> String {
self.0.iter().map(|x| x.content()).collect::<String>()
}
}
#[derive(PartialEq, Eq)]
pub enum StringPart {
Normal(String),
Highlighted(String),
}
impl StringPart {
pub fn content(&self) -> &str {
match self {
&StringPart::Normal(ref s) | &StringPart::Highlighted(ref s) => s,
}
}
}
impl Diagnostic {
pub fn new(level: Level, message: &str) -> Self {
Diagnostic::new_with_code(level, None, message)
}
pub fn new_with_code(level: Level, code: Option<DiagnosticId>, message: &str) -> Self {
Diagnostic {
level,
message: vec![(message.to_owned(), Style::NoStyle)],
code,
span: MultiSpan::new(),
children: vec![],
suggestions: vec![],
}
}
pub fn is_error(&self) -> bool {
match self.level {
Level::Bug | Level::Fatal | Level::PhaseFatal | Level::Error | Level::FailureNote => {
true
}
Level::Warning | Level::Note | Level::Help | Level::Cancelled => false,
}
}
/// Cancel the diagnostic (a structured diagnostic must either be emitted or
/// canceled or it will panic when dropped).
pub fn cancel(&mut self) {
self.level = Level::Cancelled;
}
pub fn cancelled(&self) -> bool {
self.level == Level::Cancelled
}
/// Add a span/label to be included in the resulting snippet.
/// This is pushed onto the `MultiSpan` that was created when the
/// diagnostic was first built. If you don't call this function at
/// all, and you just supplied a `Span` to create the diagnostic,
/// then the snippet will just include that `Span`, which is
/// called the primary span.
pub fn span_label<T: Into<String>>(&mut self, span: Span, label: T) -> &mut Self {
self.span.push_span_label(span, label.into());
self
}
pub fn replace_span_with(&mut self, after: Span) -> &mut Self {
let before = self.span.clone();
self.set_span(after);
for span_label in before.span_labels() {
if let Some(label) = span_label.label {
self.span_label(after, label);
}
}
self
}
pub fn note_expected_found(
&mut self,
label: &dyn fmt::Display,
expected: DiagnosticStyledString,
found: DiagnosticStyledString,
) -> &mut Self {
self.note_expected_found_extra(label, expected, found, &"", &"")
}
pub fn note_expected_found_extra(
&mut self,
label: &dyn fmt::Display,
expected: DiagnosticStyledString,
found: DiagnosticStyledString,
expected_extra: &dyn fmt::Display,
found_extra: &dyn fmt::Display,
) -> &mut Self {
let mut msg: Vec<_> = vec![(format!("expected {} `", label), Style::NoStyle)];
msg.extend(expected.0.iter().map(|x| match *x {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
msg.push((format!("`{}\n", expected_extra), Style::NoStyle));
msg.push((format!(" found {} `", label), Style::NoStyle));
msg.extend(found.0.iter().map(|x| match *x {
StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
}));
msg.push((format!("`{}", found_extra), Style::NoStyle));
// For now, just attach these as notes
self.highlighted_note(msg);
self
}
pub fn note_trait_signature(&mut self, name: String, signature: String) -> &mut Self {
self.highlighted_note(vec![
(format!("`{}` from trait: `", name), Style::NoStyle),
(signature, Style::Highlight),
("`".to_string(), Style::NoStyle),
]);
self
}
pub fn note(&mut self, msg: &str) -> &mut Self {
self.sub(Level::Note, msg, MultiSpan::new(), None);
self
}
pub fn highlighted_note(&mut self, msg: Vec<(String, Style)>) -> &mut Self {
self.sub_with_highlights(Level::Note, msg, MultiSpan::new(), None);
self
}
pub fn span_note<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
self.sub(Level::Note, msg, sp.into(), None);
self
}
pub fn warn(&mut self, msg: &str) -> &mut Self {
self.sub(Level::Warning, msg, MultiSpan::new(), None);
self
}
pub fn span_warn<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
self.sub(Level::Warning, msg, sp.into(), None);
self
}
pub fn help(&mut self, msg: &str) -> &mut Self {
self.sub(Level::Help, msg, MultiSpan::new(), None);
self
}
pub fn span_help<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
self.sub(Level::Help, msg, sp.into(), None);
self
}
/// Prints out a message with a suggested edit of the code. If the
/// suggestion is presented inline it will only show the text message
/// and not the text.
///
/// See `CodeSuggestion` for more information.
#[deprecated(note = "Use `span_suggestion_short_with_applicability`")]
pub fn span_suggestion_short(&mut self, sp: Span, msg: &str, suggestion: String) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: vec![SubstitutionPart {
snippet: suggestion,
span: sp,
}],
}],
msg: msg.to_owned(),
show_code_when_inline: false,
applicability: Applicability::Unspecified,
});
self
}
/// Prints out a message with a suggested edit of the code.
///
/// In case of short messages and a simple suggestion,
/// rustc displays it as a label like
///
/// "try adding parentheses: `(tup.0).1`"
///
/// The message
///
/// * should not end in any punctuation (a `:` is added automatically)
/// * should not be a question
/// * should not contain any parts like "the following", "as shown"
/// * may look like "to do xyz, use" or "to do xyz, use abc"
/// * may contain a name of a function, variable or type, but not whole
/// expressions
///
/// See `CodeSuggestion` for more information.
#[deprecated(note = "Use `span_suggestion_with_applicability`")]
pub fn span_suggestion(&mut self, sp: Span, msg: &str, suggestion: String) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: vec![SubstitutionPart {
snippet: suggestion,
span: sp,
}],
}],
msg: msg.to_owned(),
show_code_when_inline: true,
applicability: Applicability::Unspecified,
});
self
}
pub fn multipart_suggestion_with_applicability(
&mut self,
msg: &str,
suggestion: Vec<(Span, String)>,
applicability: Applicability,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: suggestion
.into_iter()
.map(|(span, snippet)| SubstitutionPart { snippet, span })
.collect(),
}],
msg: msg.to_owned(),
show_code_when_inline: true,
applicability,
});
self
}
#[deprecated(note = "Use `multipart_suggestion_with_applicability`")]
pub fn multipart_suggestion(
&mut self,
msg: &str,
suggestion: Vec<(Span, String)>,
) -> &mut Self {
self.multipart_suggestion_with_applicability(msg, suggestion, Applicability::Unspecified)
}
/// Prints out a message with multiple suggested edits of the code.
#[deprecated(note = "Use `span_suggestions_with_applicability`")]
pub fn span_suggestions(&mut self, sp: Span, msg: &str, suggestions: Vec<String>) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: suggestions
.into_iter()
.map(|snippet| Substitution {
parts: vec![SubstitutionPart { snippet, span: sp }],
})
.collect(),
msg: msg.to_owned(),
show_code_when_inline: true,
applicability: Applicability::Unspecified,
});
self
}
/// This is a suggestion that may contain mistakes or fillers and should
/// be read and understood by a human.
pub fn span_suggestion_with_applicability(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: vec![SubstitutionPart {
snippet: suggestion,
span: sp,
}],
}],
msg: msg.to_owned(),
show_code_when_inline: true,
applicability,
});
self
}
pub fn span_suggestions_with_applicability(
&mut self,
sp: Span,
msg: &str,
suggestions: impl Iterator<Item = String>,
applicability: Applicability,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: suggestions
.map(|snippet| Substitution {
parts: vec![SubstitutionPart { snippet, span: sp }],
})
.collect(),
msg: msg.to_owned(),
show_code_when_inline: true,
applicability,
});
self
}
pub fn span_suggestion_short_with_applicability(
&mut self,
sp: Span,
msg: &str,
suggestion: String,
applicability: Applicability,
) -> &mut Self {
self.suggestions.push(CodeSuggestion {
substitutions: vec![Substitution {
parts: vec![SubstitutionPart {
snippet: suggestion,
span: sp,
}],
}],
msg: msg.to_owned(),
show_code_when_inline: false,
applicability,
});
self
}
pub fn set_span<S: Into<MultiSpan>>(&mut self, sp: S) -> &mut Self {
self.span = sp.into();
self
}
pub fn code(&mut self, s: DiagnosticId) -> &mut Self {
self.code = Some(s);
self
}
pub fn get_code(&self) -> Option<DiagnosticId> {
self.code.clone()
}
pub fn message(&self) -> String {
self.message
.iter()
.map(|i| i.0.as_str())
.collect::<String>()
}
pub fn styled_message(&self) -> &Vec<(String, Style)> {
&self.message
}
/// Used by a lint. Copies over all details *but* the "main
/// message".
pub fn copy_details_not_message(&mut self, from: &Diagnostic) {
self.span = from.span.clone();
self.code = from.code.clone();
self.children.extend(from.children.iter().cloned())
}
/// Convenience function for internal use, clients should use one of the
/// public methods above.
pub fn sub(
&mut self,
level: Level,
message: &str,
span: MultiSpan,
render_span: Option<MultiSpan>,
) {
let sub = SubDiagnostic {
level,
message: vec![(message.to_owned(), Style::NoStyle)],
span,
render_span,
};
self.children.push(sub);
}
/// Convenience function for internal use, clients should use one of the
/// public methods above.
fn sub_with_highlights(
&mut self,
level: Level,
message: Vec<(String, Style)>,
span: MultiSpan,
render_span: Option<MultiSpan>,
) {
let sub = SubDiagnostic {
level,
message,
span,
render_span,
};
self.children.push(sub);
}
}
impl SubDiagnostic {
pub fn message(&self) -> String {
self.message
.iter()
.map(|i| i.0.as_str())
.collect::<String>()
}
pub fn styled_message(&self) -> &Vec<(String, Style)> {
&self.message
}
}
| 31.357143 | 100 | 0.565791 |
216ec09f2080d597c3c2a13e23470b40084c9274 | 2,512 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::S3NDTR {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct NDTR {
bits: u16,
}
impl NDTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _NDTW<'a> {
w: &'a mut W,
}
impl<'a> _NDTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Number of data items to transfer"]
#[inline]
pub fn ndt(&self) -> NDTR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
NDTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - Number of data items to transfer"]
#[inline]
pub fn ndt(&mut self) -> _NDTW {
_NDTW { w: self }
}
}
| 23.698113 | 59 | 0.494029 |
50b0e939311ad4c0e1e3b4c6fa2fe7d099c70351 | 694 | //! A non-Arc property cannot be injected
use shaku::{Component, Interface, Provider};
trait DependencyTrait: Interface {}
trait ComponentTrait: Interface {}
trait ProviderTrait {}
#[derive(Component)]
#[shaku(interface = DependencyTrait)]
struct DependencyImpl;
impl DependencyTrait for DependencyImpl {}
#[derive(Component)]
#[shaku(interface = ComponentTrait)]
struct ComponentImpl {
#[shaku(inject)]
dependency: Box<dyn DependencyTrait>,
}
impl ComponentTrait for ComponentImpl {}
#[derive(Provider)]
#[shaku(interface = ProviderTrait)]
struct ProviderImpl {
#[shaku(inject)]
dependency: Box<dyn DependencyTrait>,
}
impl ProviderTrait for ProviderImpl {}
fn main() {}
| 22.387097 | 44 | 0.744957 |
03c731a20e9d00275f124c737e67fa63de7e64dd | 7,286 | ///////////////////////////////////////////////////////////////////////////////
//
// Copyright 2018-2021 Robonomics Network <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
//! Polkadot collator service implementation.
use super::{new_partial, Executor, RuntimeApi};
use cumulus_client_consensus_relay_chain::{
build_relay_chain_consensus, BuildRelayChainConsensusParams,
};
use cumulus_client_network::build_block_announce_validator;
use cumulus_client_service::{
prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams,
};
use cumulus_primitives_parachain_inherent::ParachainInherentData;
use robonomics_primitives::Block;
use sc_service::{Configuration, Role, TFullClient, TaskManager};
use std::sync::Arc;
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
///
/// This is the actual implementation that is abstract over the executor and the runtime api.
#[sc_tracing::logging::prefix_logs_with("Parachain")]
async fn start_node_impl(
parachain_config: Configuration,
polkadot_config: Configuration,
id: polkadot_primitives::v0::Id,
validator_account: Option<sp_core::H160>,
) -> sc_service::error::Result<(TaskManager, Arc<TFullClient<Block, RuntimeApi, Executor>>)> {
if matches!(parachain_config.role, Role::Light) {
return Err("Light client not supported!".into());
}
let parachain_config = prepare_node_config(parachain_config);
let params = new_partial(¶chain_config)?;
let (mut telemetry, telemetry_worker_handle) = params.other;
let relay_chain_full_node =
cumulus_client_service::build_polkadot_full_node(polkadot_config, telemetry_worker_handle)
.map_err(|e| match e {
polkadot_service::Error::Sub(x) => x,
s => format!("{}", s).into(),
})?;
let client = params.client.clone();
let backend = params.backend.clone();
let block_announce_validator = build_block_announce_validator(
relay_chain_full_node.client.clone(),
id,
Box::new(relay_chain_full_node.network.clone()),
relay_chain_full_node.backend.clone(),
);
let prometheus_registry = parachain_config.prometheus_registry().cloned();
let transaction_pool = params.transaction_pool.clone();
let mut task_manager = params.task_manager;
let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue);
let (network, system_rpc_tx, start_network) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: ¶chain_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue: import_queue.clone(),
on_demand: None,
block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)),
})?;
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
on_demand: None,
remote_blockchain: None,
rpc_extensions_builder: Box::new(|_, _| ()),
client: client.clone(),
transaction_pool: transaction_pool.clone(),
task_manager: &mut task_manager,
config: parachain_config,
keystore: params.keystore_container.sync_keystore(),
backend: backend.clone(),
network: network.clone(),
system_rpc_tx,
telemetry: telemetry.as_mut(),
})?;
let announce_block = {
let network = network.clone();
Arc::new(move |hash, data| network.announce_block(hash, data))
};
if let Some(account) = validator_account {
let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
task_manager.spawn_handle(),
client.clone(),
transaction_pool,
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);
let relay_chain_backend = relay_chain_full_node.backend.clone();
let relay_chain_client = relay_chain_full_node.client.clone();
let parachain_consensus = build_relay_chain_consensus(BuildRelayChainConsensusParams {
para_id: id,
proposer_factory,
block_import: client.clone(),
relay_chain_client: relay_chain_full_node.client.clone(),
relay_chain_backend: relay_chain_full_node.backend.clone(),
create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
let parachain_inherent = ParachainInherentData::create_at_with_client(
relay_parent,
&relay_chain_client,
&*relay_chain_backend,
&validation_data,
id,
);
async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let lighthouse = pallet_robonomics_lighthouse::InherentDataProvider(Vec::from(
account.as_ref(),
));
let parachain = parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(
"Failed to create parachain inherent",
)
})?;
Ok((timestamp, lighthouse, parachain))
}
},
});
let spawner = task_manager.spawn_handle();
let params = StartCollatorParams {
para_id: id,
block_status: client.clone(),
import_queue,
announce_block,
client: client.clone(),
task_manager: &mut task_manager,
relay_chain_full_node,
spawner,
parachain_consensus,
};
start_collator(params).await?;
} else {
let params = StartFullNodeParams {
client: client.clone(),
announce_block,
task_manager: &mut task_manager,
para_id: id,
relay_chain_full_node,
};
start_full_node(params)?;
}
start_network.start_network();
Ok((task_manager, client))
}
/// Start a normal parachain node.
pub async fn start_node(
parachain_config: Configuration,
polkadot_config: Configuration,
id: polkadot_primitives::v0::Id,
validator_account: Option<sp_core::H160>,
) -> sc_service::error::Result<(TaskManager, Arc<TFullClient<Block, RuntimeApi, Executor>>)> {
start_node_impl(parachain_config, polkadot_config, id, validator_account).await
}
| 40.032967 | 99 | 0.636563 |
deed3ad533e8ecd90d0d2429dd071c9afd60da5a | 5,842 | use anyhow::{bail, Context, Result};
use std::{collections::HashMap, path::Path};
use super::controller::Controller;
use crate::cgroups::{
common,
stats::{parse_single_value, supported_page_sizes, HugeTlbStats, StatsProvider},
};
use oci_spec::{LinuxHugepageLimit, LinuxResources};
pub struct HugeTlb {}
impl Controller for HugeTlb {
fn apply(linux_resources: &LinuxResources, cgroup_root: &std::path::Path) -> Result<()> {
log::debug!("Apply hugetlb cgroup v2 config");
if let Some(hugepage_limits) = Self::needs_to_handle(linux_resources) {
for hugetlb in hugepage_limits {
Self::apply(cgroup_root, hugetlb)?
}
}
Ok(())
}
}
impl StatsProvider for HugeTlb {
type Stats = HashMap<String, HugeTlbStats>;
fn stats(cgroup_path: &Path) -> Result<Self::Stats> {
let page_sizes = supported_page_sizes()?;
let mut hugetlb_stats = HashMap::with_capacity(page_sizes.len());
for page_size in page_sizes {
hugetlb_stats.insert(
page_size.clone(),
Self::stats_for_page_size(cgroup_path, &page_size)?,
);
}
Ok(hugetlb_stats)
}
}
impl HugeTlb {
fn apply(root_path: &Path, hugetlb: &LinuxHugepageLimit) -> Result<()> {
let page_size: String = hugetlb
.page_size
.chars()
.take_while(|c| c.is_digit(10))
.collect();
let page_size: u64 = page_size.parse()?;
if !Self::is_power_of_two(page_size) {
bail!("page size must be in the format of 2^(integer)");
}
common::write_cgroup_file(
root_path.join(format!("hugetlb.{}.limit_in_bytes", hugetlb.page_size)),
hugetlb.limit,
)?;
Ok(())
}
fn needs_to_handle(linux_resources: &LinuxResources) -> Option<&Vec<LinuxHugepageLimit>> {
if !linux_resources.hugepage_limits.is_empty() {
return Some(&linux_resources.hugepage_limits);
}
None
}
fn is_power_of_two(number: u64) -> bool {
(number != 0) && (number & (number - 1)) == 0
}
fn stats_for_page_size(cgroup_path: &Path, page_size: &str) -> Result<HugeTlbStats> {
let events_file = format!("hugetlb.{}.events", page_size);
let events = common::read_cgroup_file(cgroup_path.join(&events_file))?;
let fail_count: u64 = events
.lines()
.find(|l| l.starts_with("max"))
.map(|l| l[3..].trim().parse())
.transpose()
.with_context(|| format!("failed to parse max value for {}", events_file))?
.unwrap_or_default();
Ok(HugeTlbStats {
usage: parse_single_value(&cgroup_path.join(format!("hugetlb.{}.current", page_size)))?,
fail_count,
..Default::default()
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cgroups::test::set_fixture;
use crate::utils::create_temp_dir;
use oci_spec::LinuxHugepageLimit;
use std::fs::read_to_string;
#[test]
fn test_set_hugetlb() {
let page_file_name = "hugetlb.2MB.limit_in_bytes";
let tmp = create_temp_dir("test_set_hugetlbv2").expect("create temp directory for test");
set_fixture(&tmp, page_file_name, "0").expect("Set fixture for 2 MB page size");
let hugetlb = LinuxHugepageLimit {
page_size: "2MB".to_owned(),
limit: 16384,
};
HugeTlb::apply(&tmp, &hugetlb).expect("apply hugetlb");
let content = read_to_string(tmp.join(page_file_name)).expect("Read hugetlb file content");
assert_eq!(hugetlb.limit.to_string(), content);
}
#[test]
fn test_set_hugetlb_with_invalid_page_size() {
let tmp = create_temp_dir("test_set_hugetlbv2_with_invalid_page_size")
.expect("create temp directory for test");
let hugetlb = LinuxHugepageLimit {
page_size: "3MB".to_owned(),
limit: 16384,
};
let result = HugeTlb::apply(&tmp, &hugetlb);
assert!(
result.is_err(),
"page size that is not a power of two should be an error"
);
}
quickcheck! {
fn property_test_set_hugetlb(hugetlb: LinuxHugepageLimit) -> bool {
let page_file_name = format!("hugetlb.{:?}.limit_in_bytes", hugetlb.page_size);
let tmp = create_temp_dir("property_test_set_hugetlbv2").expect("create temp directory for test");
set_fixture(&tmp, &page_file_name, "0").expect("Set fixture for page size");
let result = HugeTlb::apply(&tmp, &hugetlb);
let page_size: String = hugetlb
.page_size
.chars()
.take_while(|c| c.is_digit(10))
.collect();
let page_size: u64 = page_size.parse().expect("parse page size");
if HugeTlb::is_power_of_two(page_size) && page_size != 1 {
let content =
read_to_string(tmp.join(page_file_name)).expect("Read hugetlb file content");
hugetlb.limit.to_string() == content
} else {
result.is_err()
}
}
}
#[test]
fn test_stat_hugetbl() {
let tmp = create_temp_dir("test_stat_hugetlb").expect("create temp directory for test");
set_fixture(&tmp, "hugetlb.2MB.current", "1024\n").expect("set hugetlb current");
set_fixture(&tmp, "hugetlb.2MB.events", "max 5\n").expect("set hugetlb events");
let actual = HugeTlb::stats_for_page_size(&tmp, "2MB").expect("get cgroup stats");
let expected = HugeTlbStats {
usage: 1024,
max_usage: 0,
fail_count: 5,
};
assert_eq!(actual, expected);
}
}
| 33.768786 | 110 | 0.589695 |
7aed017d1690e1e4b455e47789d398bc855a972b | 8,752 | // SPDX-License-Identifier: Apache-2.0
//
// CREDITS
// * https://github.com/fortanix/rust-sgx for examples of AESM requests.
use crate::protobuf::aesm_proto::{
Request, Request_GetQuoteExRequest, Request_GetQuoteSizeExRequest,
Request_GetSupportedAttKeyIDNumRequest, Request_GetSupportedAttKeyIDsRequest,
Request_InitQuoteExRequest, Response,
};
use std::io::{Error, ErrorKind, Read, Write};
use std::mem::size_of;
use std::ops::{Deref, DerefMut};
use std::os::unix::net::UnixStream;
use protobuf::Message;
use sallyport::item::enarxcall::sgx::TargetInfo;
const SGX_TI_SIZE: usize = size_of::<TargetInfo>();
const AESM_SOCKET: &str = "/var/run/aesmd/aesm.socket";
const AESM_REQUEST_TIMEOUT: u32 = 1_000_000;
const SGX_KEY_ID_SIZE: u32 = 256;
const SGX_REPORT_SIZE: usize = 432;
struct AesmTransaction(Request);
impl Deref for AesmTransaction {
type Target = Request;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for AesmTransaction {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl AesmTransaction {
fn new() -> Self {
Self(Request::new())
}
fn request(&self) -> Result<Response, Error> {
let mut request = Vec::<u8>::new();
self.write_to_vec(&mut request).map_err(|e| {
Error::new(
ErrorKind::Other,
format!("Invalid protobuf request: {:?}. Error: {:?}", self.0, e),
)
})?;
let mut stream = UnixStream::connect(AESM_SOCKET)?;
let request_len = request.len() as u32;
// AESM daemon requires the length prepended before the request.
stream.write_all(&request_len.to_le_bytes())?;
stream.write_all(&request)?;
stream.flush()?;
let mut response_len_bytes = [0u8; 4];
stream.read_exact(&mut response_len_bytes)?;
let response_len = u32::from_le_bytes(response_len_bytes);
let mut response_bytes = vec![0; response_len as usize];
stream.read_exact(&mut response_bytes)?;
let response = Message::parse_from_bytes(&response_bytes)?;
Ok(response)
}
}
fn get_key_id_num() -> Result<u32, Error> {
let mut transaction = AesmTransaction::new();
let mut msg = Request_GetSupportedAttKeyIDNumRequest::new();
msg.set_timeout(AESM_REQUEST_TIMEOUT);
transaction.set_getSupportedAttKeyIDNumReq(msg);
let pb_msg = transaction.request()?;
let res = pb_msg.get_getSupportedAttKeyIDNumRes();
if res.get_errorCode() != 0 {
return Err(Error::new(
ErrorKind::Other,
format!(
"Received error code {:?} in GetSupportedAttKeyIDNum",
res.get_errorCode()
),
));
}
Ok(res.get_att_key_id_num())
}
fn get_key_ids(num_key_ids: u32) -> Result<Vec<Vec<u8>>, Error> {
let expected_buffer_size = num_key_ids * SGX_KEY_ID_SIZE;
let mut transaction = AesmTransaction::new();
let mut msg = Request_GetSupportedAttKeyIDsRequest::new();
msg.set_timeout(AESM_REQUEST_TIMEOUT);
msg.set_buf_size(expected_buffer_size);
transaction.set_getSupportedAttKeyIDsReq(msg);
let pb_msg = transaction.request()?;
let res = pb_msg.get_getSupportedAttKeyIDsRes();
if res.get_errorCode() != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("GetSupportedAttKeyIDs: error: {:?}", res.get_errorCode()),
));
}
let key_ids_blob = res.get_att_key_ids();
Ok(key_ids_blob
.chunks_exact(SGX_KEY_ID_SIZE as usize)
.map(Vec::from)
.collect())
}
/// Gets Att Key ID
pub fn get_attestation_key_id() -> Result<Vec<u8>, Error> {
let num_key_ids = get_key_id_num()?;
if num_key_ids != 1 {
return Err(Error::new(
ErrorKind::Other,
format!("Unexpected number of key IDs: {} != 1", num_key_ids),
));
}
let key_ids = get_key_ids(num_key_ids)?;
if key_ids.len() != 1 {
return Err(Error::new(
ErrorKind::Other,
format!(
"GeSupportedAttKeyIDs: invalid count: {} != 1",
key_ids.len()
),
));
}
let akid = key_ids.get(0).unwrap().clone();
Ok(akid)
}
/// Fills the Target Info of the QE into the output buffer specified and
/// returns the number of bytes written.
pub fn get_target_info(akid: Vec<u8>, size: usize, out_buf: &mut [u8]) -> Result<usize, Error> {
if out_buf.len() != SGX_TI_SIZE {
return Err(Error::new(
ErrorKind::Other,
format!(
"Invalid output buffer size: {} != {}",
out_buf.len(),
SGX_TI_SIZE
),
));
}
let mut transaction = AesmTransaction::new();
let mut msg = Request_InitQuoteExRequest::new();
msg.set_timeout(AESM_REQUEST_TIMEOUT);
msg.set_b_pub_key_id(true);
msg.set_att_key_id(akid);
msg.set_buf_size(size as _);
transaction.set_initQuoteExReq(msg);
let pb_msg = transaction.request()?;
let res = pb_msg.get_initQuoteExRes();
if res.get_errorCode() != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("InitQuoteExRequest: error: {:?}", res.get_errorCode()),
));
}
let ti = res.get_target_info();
if ti.len() != SGX_TI_SIZE {
return Err(Error::new(
ErrorKind::Other,
format!(
"InitQuoteEx: Invalid TARGETINFO size: {} != {}",
ti.len(),
SGX_TI_SIZE
),
));
}
out_buf.copy_from_slice(ti);
Ok(ti.len())
}
/// Gets key size
pub fn get_key_size(akid: Vec<u8>) -> Result<usize, Error> {
let mut transaction = AesmTransaction::new();
let mut msg = Request_InitQuoteExRequest::new();
msg.set_timeout(AESM_REQUEST_TIMEOUT);
msg.set_b_pub_key_id(false);
msg.set_att_key_id(akid);
transaction.set_initQuoteExReq(msg);
let pb_msg = transaction.request()?;
let res = pb_msg.get_initQuoteExRes();
if res.get_errorCode() != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("InitQuoteEx error: {:?}", res.get_errorCode()),
));
}
Ok(res.get_pub_key_id_size() as usize)
}
/// Gets quote size
pub fn get_quote_size(akid: Vec<u8>) -> Result<usize, Error> {
let mut transaction = AesmTransaction::new();
let mut msg = Request_GetQuoteSizeExRequest::new();
msg.set_timeout(AESM_REQUEST_TIMEOUT);
msg.set_att_key_id(akid);
transaction.set_getQuoteSizeExReq(msg);
let pb_msg = transaction.request()?;
let res = pb_msg.get_getQuoteSizeExRes();
if res.get_errorCode() != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("GetQuoteSizeEx error: {:?}", res.get_errorCode()),
));
}
Ok(res.get_quote_size() as usize)
}
/// Fills the Quote obtained from the AESMD for the Report specified into
/// the output buffer specified and returns the number of bytes written.
pub fn get_quote(report: &[u8], akid: Vec<u8>, out_buf: &mut [u8]) -> Result<usize, Error> {
let mut transaction = AesmTransaction::new();
let mut msg = Request_GetQuoteExRequest::new();
msg.set_timeout(AESM_REQUEST_TIMEOUT);
msg.set_report(report[0..SGX_REPORT_SIZE].to_vec());
msg.set_att_key_id(akid);
msg.set_buf_size(out_buf.len() as u32);
transaction.set_getQuoteExReq(msg);
let pb_msg = transaction.request()?;
let res = pb_msg.get_getQuoteExRes();
if res.get_errorCode() != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("GetQuoteEx error: {:?}", res.get_errorCode()),
));
}
let quote = res.get_quote();
if quote.len() != out_buf.len() {
return Err(Error::new(
ErrorKind::InvalidData,
format!(
"GetQuoteEx: Invalid QUOTE size: {} != {}",
quote.len(),
out_buf.len()
),
));
}
out_buf.copy_from_slice(quote);
Ok(quote.len())
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg_attr(not(host_can_test_attestation), ignore)]
#[test]
fn request_target_info() {
assert_eq!(std::path::Path::new(AESM_SOCKET).exists(), true);
let mut output = [1u8; SGX_TI_SIZE];
let akid = get_attestation_key_id().expect("error obtaining attestation key id");
let pkeysize = get_key_size(akid.clone()).expect("error obtaining key size");
assert_eq!(
get_target_info(akid, pkeysize, &mut output).unwrap(),
SGX_TI_SIZE
);
}
}
| 27.696203 | 96 | 0.611289 |
edfab2abfd16dce8c2287b64ee2847b72b15067f | 3,479 | //! The `source` subcommand.
use colored::*;
use command_runner::CommandRunner;
use errors::*;
use project::Project;
/// We implement `source` with a trait so we put it in its own module.
pub trait CommandSource {
/// List all the source trees associated with a project.
fn source_list<CR>(&self, runner: &CR) -> Result<()> where CR: CommandRunner;
/// Clone the specified source tree.
fn source_clone<CR>(&self, runner: &CR, alias: &str) -> Result<()>
where CR: CommandRunner;
/// Set the `mounted` flag on the specified source tree.
fn source_set_mounted<CR>(&mut self,
runner: &CR,
alias: &str,
mounted: bool)
-> Result<()>
where CR: CommandRunner;
}
impl CommandSource for Project {
fn source_list<CR>(&self, _runner: &CR) -> Result<()>
where CR: CommandRunner
{
for source in self.sources().iter() {
println!("{:25} {}", source.alias().green(), source.context());
if source.is_available_locally(self) {
let path = try!(try!(source.path(self).canonicalize())
.strip_prefix(self.root_dir()))
.to_owned();
let mounted = if source.mounted() {
"(mounted)".normal()
} else {
"(NOT MOUNTED)".red().bold()
};
println!(" Available at {} {}", path.display(), mounted);
}
}
Ok(())
}
fn source_clone<CR>(&self, runner: &CR, alias: &str) -> Result<()>
where CR: CommandRunner
{
let source = try!(self.sources()
.find_by_alias(alias)
.ok_or_else(|| ErrorKind::UnknownSource(alias.to_owned())));
if !source.is_available_locally(self) {
try!(source.clone_source(runner, self));
} else {
println!("'{}' is already available locally", source.alias());
}
Ok(())
}
fn source_set_mounted<CR>(&mut self,
runner: &CR,
alias: &str,
mounted: bool)
-> Result<()>
where CR: CommandRunner
{
{
// Look up the source mutably. We do this in a block so we can
// drop the mutable borrow before continuing and keep Rust
// happy.
let source = try!(self.sources_mut()
.find_by_alias_mut(alias)
.ok_or_else(|| ErrorKind::UnknownSource(alias.to_owned())));
// Set the mounted flag on our source.
source.set_mounted(mounted);
}
// Write our persistent project settings back to disk.
try!(self.save_settings());
// Clone the source if we're mounting it but don't have a local
// copy yet.
let source = try!(self.sources()
.find_by_alias(alias)
.ok_or_else(|| ErrorKind::UnknownSource(alias.to_owned())));
if source.mounted() && !source.is_available_locally(self) {
try!(self.source_clone(runner, alias));
}
// Notify the user that they need to run `up`.
println!("Now run `cage up` for these changes to take effect.");
Ok(())
}
}
// No tests because this is a very thin wrapper over `Sources` and `Source`.
| 34.107843 | 81 | 0.51969 |
0ab01ab9c2a0dfa3bd531295284c78c612003914 | 1,748 | #![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
#![allow(clippy::vec_init_then_push)]
#![allow(rustdoc::bare_urls)]
#![warn(missing_docs)]
//! <p>AWS IoT Wireless API documentation</p>
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use error_meta::Error;
pub use config::Config;
mod aws_endpoint;
/// Client and fluent builders for calling the service.
#[cfg(feature = "client")]
pub mod client;
/// Configuration for the service.
pub mod config;
/// Errors that can occur when calling the service.
pub mod error;
mod error_meta;
mod idempotency_token;
/// Input structures for operations.
pub mod input;
mod json_deser;
mod json_errors;
mod json_ser;
/// Data structures used by operation inputs/outputs.
pub mod model;
mod no_credentials;
/// All operations that this crate can perform.
pub mod operation;
mod operation_deser;
mod operation_ser;
/// Output structures for operations.
pub mod output;
/// Crate version number.
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use aws_smithy_http::byte_stream::ByteStream;
pub use aws_smithy_http::result::SdkError;
pub use aws_smithy_types::Blob;
pub use aws_smithy_types::DateTime;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("iotwireless", PKG_VERSION);
pub use aws_smithy_http::endpoint::Endpoint;
pub use aws_smithy_types::retry::RetryConfig;
pub use aws_types::app_name::AppName;
pub use aws_types::region::Region;
pub use aws_types::Credentials;
#[cfg(feature = "client")]
pub use client::Client;
| 31.214286 | 80 | 0.769451 |
0a61fea7fa6dbc078c9ba05aa746267a7dd37583 | 24,602 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use config_builder::util::{get_test_config, get_test_config_with_validators};
use executor::{CommittableBlock, Executor};
use failure::prelude::*;
use futures::executor::block_on;
use grpc_helpers::ServerHandle;
use grpcio::EnvBuilder;
use libra_config::config::{NodeConfig, VMConfig, VMPublishingOption};
use libra_crypto::{ed25519::*, hash::GENESIS_BLOCK_ID, test_utils::TEST_SEED, HashValue};
use libra_types::block_info::BlockInfo;
use libra_types::{
access_path::AccessPath,
account_address::AccountAddress,
account_config::{association_address, get_account_resource_or_default},
account_state_blob::AccountStateWithProof,
block_metadata::BlockMetadata,
crypto_proxies::ValidatorVerifier,
get_with_proof::{verify_update_to_latest_ledger_response, RequestItem},
ledger_info::{LedgerInfo, LedgerInfoWithSignatures},
test_helpers::transaction_test_helpers::get_test_signed_txn,
transaction::{Script, Transaction, TransactionListWithProof, TransactionWithProof},
};
use rand::SeedableRng;
use std::{collections::BTreeMap, sync::Arc};
use storage_client::{StorageRead, StorageReadServiceClient, StorageWriteServiceClient};
use storage_service::start_storage_service;
use transaction_builder::{
encode_block_prologue_script, encode_create_account_script,
encode_rotate_consensus_pubkey_script, encode_transfer_script,
};
use vm_runtime::MoveVM;
fn gen_block_id(index: u8) -> HashValue {
HashValue::new([index; HashValue::LENGTH])
}
fn gen_ledger_info_with_sigs(
version: u64,
root_hash: HashValue,
commit_block_id: HashValue,
) -> LedgerInfoWithSignatures<Ed25519Signature> {
let ledger_info = LedgerInfo::new(
BlockInfo::new(0, 0, commit_block_id, root_hash, version, 0, None),
HashValue::zero(),
);
LedgerInfoWithSignatures::new(ledger_info, BTreeMap::new())
}
fn gen_block_metadata(index: u8, proposer: AccountAddress) -> BlockMetadata {
BlockMetadata::new(gen_block_id(index), index as u64, BTreeMap::new(), proposer)
}
fn create_storage_service_and_executor(config: &NodeConfig) -> (ServerHandle, Executor<MoveVM>) {
let storage_server_handle = start_storage_service(config);
let client_env = Arc::new(EnvBuilder::new().build());
let storage_read_client = Arc::new(StorageReadServiceClient::new(
Arc::clone(&client_env),
&config.storage.address,
config.storage.port,
));
let storage_write_client = Arc::new(StorageWriteServiceClient::new(
Arc::clone(&client_env),
&config.storage.address,
config.storage.port,
None,
));
let executor = Executor::new(
Arc::clone(&storage_read_client) as Arc<dyn StorageRead>,
storage_write_client,
config,
);
(storage_server_handle, executor)
}
fn get_test_signed_transaction(
sender: AccountAddress,
sequence_number: u64,
private_key: Ed25519PrivateKey,
public_key: Ed25519PublicKey,
program: Option<Script>,
) -> Transaction {
Transaction::UserTransaction(get_test_signed_txn(
sender,
sequence_number,
private_key,
public_key,
program,
))
}
#[test]
fn test_reconfiguration() {
// When executing a transaction emits a validator set change, storage should propagate the new
// validator set
let (validators, consensus_peers, mut config, genesis_keypair) =
get_test_config_with_validators();
config.vm_config = VMConfig {
publishing_options: VMPublishingOption::CustomScripts,
};
let (_storage_server_handle, executor) = create_storage_service_and_executor(&config);
let genesis_account = association_address();
let validator_account = validators.keys().next().unwrap();
let (validator_privkey, _) = validators.get(validator_account).unwrap();
let validator_pubkey = &consensus_peers
.peers
.get(&validator_account.to_string())
.unwrap()
.consensus_pubkey;
// give the validator some money so they can send a tx
let txn1 = get_test_signed_transaction(
genesis_account,
/* sequence_number = */ 1,
genesis_keypair.private_key.clone(),
genesis_keypair.public_key.clone(),
Some(encode_transfer_script(validator_account, 200_000)),
);
// rotate the validator's connsensus pubkey to trigger a reconfiguration
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let (_, new_pubkey) = compat::generate_keypair(&mut rng);
let txn2 = get_test_signed_transaction(
*validator_account,
/* sequence_number = */ 0,
validator_privkey.consensus_private_key.clone(),
validator_pubkey.clone(),
Some(encode_rotate_consensus_pubkey_script(
new_pubkey.to_bytes().to_vec(),
)),
);
// Create a dummy block prologue transaction that will emit a ValidatorSetChanged event
let txn3 = encode_block_prologue_script(gen_block_metadata(1, *validator_account));
let txn_block = vec![txn1, txn2, txn3];
let block1_id = gen_block_id(1);
let vm_output = block_on(executor.execute_block(
txn_block,
executor.committed_trees().clone(),
*GENESIS_BLOCK_ID,
block1_id,
))
.unwrap()
.unwrap();
// Make sure the execution result sees the reconfiguration
assert!(
vm_output.state_compute_result().has_reconfiguration(),
"StateComputeResult is missing the new validator set"
);
// rotating to the same key should not trigger a reconfiguration
let txn4 = get_test_signed_transaction(
*validator_account,
/* sequence_number = */ 1,
validator_privkey.consensus_private_key.clone(),
validator_pubkey.clone(),
Some(encode_rotate_consensus_pubkey_script(
new_pubkey.to_bytes().to_vec(),
)),
);
let txn5 = encode_block_prologue_script(gen_block_metadata(2, *validator_account));
let txn_block = vec![txn4, txn5];
let block2_id = gen_block_id(2);
let output = block_on(executor.execute_block(
txn_block,
executor.committed_trees().clone(),
block1_id,
block2_id,
))
.unwrap()
.unwrap();
assert!(
!output.state_compute_result().has_reconfiguration(),
"StateComputeResult has a new validator set, but should not"
);
// TODO: test rotating to invalid key. Currently, this crashes the executor because the
// validator set fails to parse
}
#[test]
fn test_execution_with_storage() {
let (config, genesis_keypair) = get_test_config();
let (_storage_server_handle, executor) = create_storage_service_and_executor(&config);
let storage_read_client = Arc::new(StorageReadServiceClient::new(
Arc::new(EnvBuilder::new().build()),
&config.storage.address,
config.storage.port,
));
let seed = [1u8; 32];
// TEST_SEED is also used to generate a random validator set in get_test_config. Each account
// in this random validator set gets created in genesis. If one of {account1, account2,
// account3} already exists in genesis, the code below will fail.
assert!(seed != TEST_SEED);
let mut rng = ::rand::rngs::StdRng::from_seed(seed);
let (privkey1, pubkey1) = compat::generate_keypair(&mut rng);
let account1 = AccountAddress::from_public_key(&pubkey1);
let (privkey2, pubkey2) = compat::generate_keypair(&mut rng);
let account2 = AccountAddress::from_public_key(&pubkey2);
let (_privkey3, pubkey3) = compat::generate_keypair(&mut rng);
let account3 = AccountAddress::from_public_key(&pubkey3);
let genesis_account = association_address();
// Create account1 with 2M coins.
let txn1 = get_test_signed_transaction(
genesis_account,
/* sequence_number = */ 1,
genesis_keypair.private_key.clone(),
genesis_keypair.public_key.clone(),
Some(encode_create_account_script(&account1, 2_000_000)),
);
// Create account2 with 200k coins.
let txn2 = get_test_signed_transaction(
genesis_account,
/* sequence_number = */ 2,
genesis_keypair.private_key.clone(),
genesis_keypair.public_key.clone(),
Some(encode_create_account_script(&account2, 200_000)),
);
// Create account3 with 100k coins.
let txn3 = get_test_signed_transaction(
genesis_account,
/* sequence_number = */ 3,
genesis_keypair.private_key.clone(),
genesis_keypair.public_key.clone(),
Some(encode_create_account_script(&account3, 100_000)),
);
// Transfer 20k coins from account1 to account2.
// balance: <1.98M, 220k, 100k
let txn4 = get_test_signed_transaction(
account1,
/* sequence_number = */ 0,
privkey1.clone(),
pubkey1.clone(),
Some(encode_transfer_script(&account2, 20_000)),
);
// Transfer 10k coins from account2 to account3.
// balance: <1.98M, <210k, 110k
let txn5 = get_test_signed_transaction(
account2,
/* sequence_number = */ 0,
privkey2.clone(),
pubkey2.clone(),
Some(encode_transfer_script(&account3, 10_000)),
);
// Transfer 70k coins from account1 to account3.
// balance: <1.91M, <210k, 180k
let txn6 = get_test_signed_transaction(
account1,
/* sequence_number = */ 1,
privkey1.clone(),
pubkey1.clone(),
Some(encode_transfer_script(&account3, 70_000)),
);
let block1 = vec![txn1, txn2, txn3, txn4, txn5, txn6];
let block1_id = gen_block_id(1);
let mut block2 = vec![];
let block2_id = gen_block_id(2);
// Create 14 txns transferring 10k from account1 to account3 each.
for i in 2..=15 {
block2.push(get_test_signed_transaction(
account1,
/* sequence_number = */ i,
privkey1.clone(),
pubkey1.clone(),
Some(encode_transfer_script(&account3, 10_000)),
));
}
let output1 = block_on(executor.execute_block(
block1.clone(),
executor.committed_trees().clone(),
*GENESIS_BLOCK_ID,
block1_id,
))
.unwrap()
.unwrap();
let block1_trees = output1.executed_trees().clone();
let ledger_info_with_sigs = gen_ledger_info_with_sigs(6, output1.accu_root(), block1_id);
block_on(executor.commit_blocks(
vec![CommittableBlock::new(block1.clone(), Arc::new(output1))],
ledger_info_with_sigs,
))
.unwrap()
.unwrap();
let request_items = vec![
RequestItem::GetAccountTransactionBySequenceNumber {
account: genesis_account,
sequence_number: 1,
fetch_events: false,
},
RequestItem::GetAccountTransactionBySequenceNumber {
account: genesis_account,
sequence_number: 2,
fetch_events: false,
},
RequestItem::GetAccountTransactionBySequenceNumber {
account: genesis_account,
sequence_number: 3,
fetch_events: false,
},
RequestItem::GetAccountTransactionBySequenceNumber {
account: genesis_account,
sequence_number: 4,
fetch_events: false,
},
RequestItem::GetAccountTransactionBySequenceNumber {
account: account1,
sequence_number: 0,
fetch_events: true,
},
RequestItem::GetAccountTransactionBySequenceNumber {
account: account2,
sequence_number: 0,
fetch_events: false,
},
RequestItem::GetAccountTransactionBySequenceNumber {
account: account1,
sequence_number: 1,
fetch_events: false,
},
RequestItem::GetAccountState { address: account1 },
RequestItem::GetAccountState { address: account2 },
RequestItem::GetAccountState { address: account3 },
RequestItem::GetTransactions {
start_version: 3,
limit: 10,
fetch_events: false,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_sent_event(account1),
start_event_seq_num: 0,
ascending: true,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_sent_event(account2),
start_event_seq_num: 0,
ascending: true,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_sent_event(account3),
start_event_seq_num: 0,
ascending: true,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_received_event(account1),
start_event_seq_num: u64::max_value(),
ascending: false,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_received_event(account2),
start_event_seq_num: u64::max_value(),
ascending: false,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_received_event(account3),
start_event_seq_num: u64::max_value(),
ascending: false,
limit: 10,
},
];
let (
mut response_items,
ledger_info_with_sigs,
_validator_change_events,
_ledger_consistency_proof,
) = storage_read_client
.update_to_latest_ledger(/* client_known_version = */ 0, request_items.clone())
.unwrap();
verify_update_to_latest_ledger_response(
Arc::new(ValidatorVerifier::new(BTreeMap::new())),
0,
&request_items,
&response_items,
&ledger_info_with_sigs,
)
.unwrap();
response_items.reverse();
let (t1, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t1.as_ref(), &block1[0]).unwrap();
let (t2, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t2.as_ref(), &block1[1]).unwrap();
let (t3, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t3.as_ref(), &block1[2]).unwrap();
let (tn, pn) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_uncommitted_txn_status(
tn.as_ref(),
pn.as_ref(),
/* next_seq_num_of_this_account = */ 4,
)
.unwrap();
let (t4, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t4.as_ref(), &block1[3]).unwrap();
// We requested the events to come back from this one, so verify that they did
assert_eq!(t4.unwrap().events.unwrap().len(), 2);
let (t5, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t5.as_ref(), &block1[4]).unwrap();
let (t6, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t6.as_ref(), &block1[5]).unwrap();
let account1_state_with_proof = response_items
.pop()
.unwrap()
.into_get_account_state_response()
.unwrap();
verify_account_balance(&account1_state_with_proof, |x| x < 1_910_000).unwrap();
let account2_state_with_proof = response_items
.pop()
.unwrap()
.into_get_account_state_response()
.unwrap();
verify_account_balance(&account2_state_with_proof, |x| x < 210_000).unwrap();
let account3_state_with_proof = response_items
.pop()
.unwrap()
.into_get_account_state_response()
.unwrap();
verify_account_balance(&account3_state_with_proof, |x| x == 180_000).unwrap();
let transaction_list_with_proof = response_items
.pop()
.unwrap()
.into_get_transactions_response()
.unwrap();
verify_transactions(&transaction_list_with_proof, &block1[2..]).unwrap();
let (account1_sent_events, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account1_sent_events.len(), 2);
let (account2_sent_events, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account2_sent_events.len(), 1);
let (account3_sent_events, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account3_sent_events.len(), 0);
let (account1_received_events, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account1_received_events.len(), 1);
let (account2_received_events, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account2_received_events.len(), 2);
let (account3_received_events, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account3_received_events.len(), 3);
// Execution the 2nd block.
let output2 =
block_on(executor.execute_block(block2.clone(), block1_trees, block1_id, block2_id))
.unwrap()
.unwrap();
let ledger_info_with_sigs = gen_ledger_info_with_sigs(20, output2.accu_root(), block2_id);
block_on(executor.commit_blocks(
vec![CommittableBlock::new(block2.clone(), Arc::new(output2))],
ledger_info_with_sigs,
))
.unwrap()
.unwrap();
let request_items = vec![
RequestItem::GetAccountTransactionBySequenceNumber {
account: account1,
sequence_number: 2,
fetch_events: false,
},
RequestItem::GetAccountTransactionBySequenceNumber {
account: account1,
sequence_number: 15,
fetch_events: false,
},
RequestItem::GetAccountState { address: account1 },
RequestItem::GetAccountState { address: account3 },
RequestItem::GetTransactions {
start_version: 7,
limit: 14,
fetch_events: false,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_sent_event(account1),
start_event_seq_num: 0,
ascending: true,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_sent_event(account1),
start_event_seq_num: 10,
ascending: true,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_received_event(account3),
start_event_seq_num: u64::max_value(),
ascending: false,
limit: 10,
},
RequestItem::GetEventsByEventAccessPath {
access_path: AccessPath::new_for_received_event(account3),
start_event_seq_num: 6,
ascending: false,
limit: 10,
},
];
let (
mut response_items,
ledger_info_with_sigs,
_validator_change_events,
_ledger_consistency_proof,
) = storage_read_client
.update_to_latest_ledger(/* client_known_version = */ 0, request_items.clone())
.unwrap();
verify_update_to_latest_ledger_response(
Arc::new(ValidatorVerifier::new(BTreeMap::new())),
0,
&request_items,
&response_items,
&ledger_info_with_sigs,
)
.unwrap();
response_items.reverse();
let (t7, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t7.as_ref(), &block2[0]).unwrap();
let (t20, _) = response_items
.pop()
.unwrap()
.into_get_account_txn_by_seq_num_response()
.unwrap();
verify_committed_txn_status(t20.as_ref(), &block2[13]).unwrap();
let account1_state_with_proof = response_items
.pop()
.unwrap()
.into_get_account_state_response()
.unwrap();
verify_account_balance(&account1_state_with_proof, |x| x < 1_770_000).unwrap();
let account3_state_with_proof = response_items
.pop()
.unwrap()
.into_get_account_state_response()
.unwrap();
verify_account_balance(&account3_state_with_proof, |x| x == 320_000).unwrap();
let transaction_list_with_proof = response_items
.pop()
.unwrap()
.into_get_transactions_response()
.unwrap();
verify_transactions(&transaction_list_with_proof, &block2[..]).unwrap();
let (account1_sent_events_batch1, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account1_sent_events_batch1.len(), 10);
let (account1_sent_events_batch2, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account1_sent_events_batch2.len(), 6);
let (account3_received_events_batch1, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account3_received_events_batch1.len(), 10);
assert_eq!(
account3_received_events_batch1[0].event.sequence_number(),
16
);
let (account3_received_events_batch2, _) = response_items
.pop()
.unwrap()
.into_get_events_by_access_path_response()
.unwrap();
assert_eq!(account3_received_events_batch2.len(), 7);
assert_eq!(
account3_received_events_batch2[0].event.sequence_number(),
6
);
}
fn verify_account_balance<F>(account_state_with_proof: &AccountStateWithProof, f: F) -> Result<()>
where
F: Fn(u64) -> bool,
{
let balance = get_account_resource_or_default(&account_state_with_proof.blob)?.balance();
ensure!(
f(balance),
"balance {} doesn't satisfy the condition passed in",
balance
);
Ok(())
}
fn verify_transactions(
txn_list_with_proof: &TransactionListWithProof,
expected_txns: &[Transaction],
) -> Result<()> {
let txns = &txn_list_with_proof.transactions;
ensure!(
*txns == expected_txns,
"expected txns {:?} doesn't equal to returned txns {:?}",
expected_txns,
txns
);
Ok(())
}
fn verify_committed_txn_status(
txn_with_proof: Option<&TransactionWithProof>,
expected_txn: &Transaction,
) -> Result<()> {
let txn = &txn_with_proof
.ok_or_else(|| format_err!("Transaction is not committed."))?
.transaction;
ensure!(
expected_txn == txn,
"The two transactions do not match. Expected txn: {:?}, returned txn: {:?}",
expected_txn,
txn,
);
Ok(())
}
fn verify_uncommitted_txn_status(
txn_with_proof: Option<&TransactionWithProof>,
proof_of_current_sequence_number: Option<&AccountStateWithProof>,
expected_seq_num: u64,
) -> Result<()> {
ensure!(
txn_with_proof.is_none(),
"Transaction is unexpectedly committed."
);
let proof_of_current_sequence_number = proof_of_current_sequence_number.ok_or_else(|| {
format_err!(
"proof_of_current_sequence_number should be provided when transaction is not committed."
)
})?;
let seq_num_in_account =
get_account_resource_or_default(&proof_of_current_sequence_number.blob)?.sequence_number();
ensure!(
expected_seq_num == seq_num_in_account,
"expected_seq_num {} doesn't match that in account state \
in TransactionStatus::Uncommmitted {}",
expected_seq_num,
seq_num_in_account,
);
Ok(())
}
| 32.934404 | 99 | 0.645679 |
233d5c969448f61cbc53514263edf63d7bc580c6 | 12,007 | use std::io;
use std::mem;
use winapi::shared::minwindef::WORD;
use winapi::um::consoleapi::{GetConsoleMode, SetConsoleMode};
use winapi::um::wincon::{
self, GetConsoleScreenBufferInfo, SetConsoleTextAttribute,
CONSOLE_SCREEN_BUFFER_INFO, FOREGROUND_BLUE as FG_BLUE,
FOREGROUND_GREEN as FG_GREEN, FOREGROUND_INTENSITY as FG_INTENSITY,
FOREGROUND_RED as FG_RED,
};
use crate::{AsHandleRef, HandleRef};
const FG_CYAN: WORD = FG_BLUE | FG_GREEN;
const FG_MAGENTA: WORD = FG_BLUE | FG_RED;
const FG_YELLOW: WORD = FG_GREEN | FG_RED;
const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED;
/// Query the given handle for information about the console's screen buffer.
///
/// The given handle should represent a console. Otherwise, an error is
/// returned.
///
/// This corresponds to calling [`GetConsoleScreenBufferInfo`].
///
/// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo
pub fn screen_buffer_info<H: AsHandleRef>(
h: H,
) -> io::Result<ScreenBufferInfo> {
unsafe {
let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed();
let rc = GetConsoleScreenBufferInfo(h.as_raw(), &mut info);
if rc == 0 {
return Err(io::Error::last_os_error());
}
Ok(ScreenBufferInfo(info))
}
}
/// Set the text attributes of the console represented by the given handle.
///
/// This corresponds to calling [`SetConsoleTextAttribute`].
///
/// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute
pub fn set_text_attributes<H: AsHandleRef>(
h: H,
attributes: u16,
) -> io::Result<()> {
if unsafe { SetConsoleTextAttribute(h.as_raw(), attributes) } == 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Query the mode of the console represented by the given handle.
///
/// This corresponds to calling [`GetConsoleMode`], which describes the return
/// value.
///
/// [`GetConsoleMode`]: https://docs.microsoft.com/en-us/windows/console/getconsolemode
pub fn mode<H: AsHandleRef>(h: H) -> io::Result<u32> {
let mut mode = 0;
if unsafe { GetConsoleMode(h.as_raw(), &mut mode) } == 0 {
Err(io::Error::last_os_error())
} else {
Ok(mode)
}
}
/// Set the mode of the console represented by the given handle.
///
/// This corresponds to calling [`SetConsoleMode`], which describes the format
/// of the mode parameter.
///
/// [`SetConsoleMode`]: https://docs.microsoft.com/en-us/windows/console/setconsolemode
pub fn set_mode<H: AsHandleRef>(h: H, mode: u32) -> io::Result<()> {
if unsafe { SetConsoleMode(h.as_raw(), mode) } == 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Represents console screen buffer information such as size, cursor position
/// and styling attributes.
///
/// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`].
///
/// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
#[derive(Clone)]
pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO);
impl ScreenBufferInfo {
/// Returns the size of the console screen buffer, in character columns and
/// rows.
///
/// This corresponds to `dwSize`.
pub fn size(&self) -> (i16, i16) {
(self.0.dwSize.X, self.0.dwSize.Y)
}
/// Returns the position of the cursor in terms of column and row
/// coordinates of the console screen buffer.
///
/// This corresponds to `dwCursorPosition`.
pub fn cursor_position(&self) -> (i16, i16) {
(self.0.dwCursorPosition.X, self.0.dwCursorPosition.Y)
}
/// Returns the character attributes associated with this console.
///
/// This corresponds to `wAttributes`.
///
/// See [`char info`] for more details.
///
/// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str
pub fn attributes(&self) -> u16 {
self.0.wAttributes
}
/// Returns the maximum size of the console window, in character columns
/// and rows, given the current screen buffer size and font and the screen
/// size.
pub fn max_window_size(&self) -> (i16, i16) {
(self.0.dwMaximumWindowSize.X, self.0.dwMaximumWindowSize.Y)
}
/// Returns the console screen buffer coordinates of the upper-left and
/// lower-right corners of the display window.
///
/// This corresponds to `srWindow`.
pub fn window_rect(&self) -> SmallRect {
SmallRect {
left: self.0.srWindow.Left,
top: self.0.srWindow.Top,
right: self.0.srWindow.Right,
bottom: self.0.srWindow.Bottom,
}
}
}
/// Defines the coordinates of the upper left and lower right corners of a rectangle.
///
/// This corresponds to [`SMALL_RECT`].
///
/// [`SMALL_RECT`]: https://docs.microsoft.com/en-us/windows/console/small-rect-str
pub struct SmallRect {
pub left: i16,
pub top: i16,
pub right: i16,
pub bottom: i16,
}
/// A Windows console.
///
/// This represents a very limited set of functionality available to a Windows
/// console. In particular, it can only change text attributes such as color
/// and intensity. This may grow over time. If you need more routines, please
/// file an issue and/or PR.
///
/// There is no way to "write" to this console. Simply write to
/// stdout or stderr instead, while interleaving instructions to the console
/// to change text attributes.
///
/// A common pitfall when using a console is to forget to flush writes to
/// stdout before setting new text attributes.
///
/// # Example
/// ```no_run
/// # #[cfg(windows)]
/// # {
/// use winapi_util::console::{Console, Color, Intense};
///
/// let mut con = Console::stdout().unwrap();
/// con.fg(Intense::Yes, Color::Cyan).unwrap();
/// println!("This text will be intense cyan.");
/// con.reset().unwrap();
/// println!("This text will be normal.");
/// # }
/// ```
#[derive(Debug)]
pub struct Console {
kind: HandleKind,
start_attr: TextAttributes,
cur_attr: TextAttributes,
}
#[derive(Clone, Copy, Debug)]
enum HandleKind {
Stdout,
Stderr,
}
impl HandleKind {
fn handle(&self) -> HandleRef {
match *self {
HandleKind::Stdout => HandleRef::stdout(),
HandleKind::Stderr => HandleRef::stderr(),
}
}
}
impl Console {
/// Get a console for a standard I/O stream.
fn create_for_stream(kind: HandleKind) -> io::Result<Console> {
let h = kind.handle();
let info = screen_buffer_info(&h)?;
let attr = TextAttributes::from_word(info.attributes());
Ok(Console { kind: kind, start_attr: attr, cur_attr: attr })
}
/// Create a new Console to stdout.
///
/// If there was a problem creating the console, then an error is returned.
pub fn stdout() -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stdout)
}
/// Create a new Console to stderr.
///
/// If there was a problem creating the console, then an error is returned.
pub fn stderr() -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stderr)
}
/// Applies the current text attributes.
fn set(&mut self) -> io::Result<()> {
set_text_attributes(self.kind.handle(), self.cur_attr.to_word())
}
/// Apply the given intensity and color attributes to the console
/// foreground.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.fg_color = color;
self.cur_attr.fg_intense = intense;
self.set()
}
/// Apply the given intensity and color attributes to the console
/// background.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.bg_color = color;
self.cur_attr.bg_intense = intense;
self.set()
}
/// Reset the console text attributes to their original settings.
///
/// The original settings correspond to the text attributes on the console
/// when this `Console` value was created.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn reset(&mut self) -> io::Result<()> {
self.cur_attr = self.start_attr;
self.set()
}
/// Toggle virtual terminal processing.
///
/// This method attempts to toggle virtual terminal processing for this
/// console. If there was a problem toggling it, then an error returned.
/// On success, the caller may assume that toggling it was successful.
///
/// When virtual terminal processing is enabled, characters emitted to the
/// console are parsed for VT100 and similar control character sequences
/// that control color and other similar operations.
pub fn set_virtual_terminal_processing(
&mut self,
yes: bool,
) -> io::Result<()> {
let vt = wincon::ENABLE_VIRTUAL_TERMINAL_PROCESSING;
let handle = self.kind.handle();
let old_mode = mode(&handle)?;
let new_mode = if yes { old_mode | vt } else { old_mode & !vt };
if old_mode == new_mode {
return Ok(());
}
set_mode(&handle, new_mode)
}
}
/// A representation of text attributes for the Windows console.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TextAttributes {
fg_color: Color,
fg_intense: Intense,
bg_color: Color,
bg_intense: Intense,
}
impl TextAttributes {
fn to_word(&self) -> WORD {
let mut w = 0;
w |= self.fg_color.to_fg();
w |= self.fg_intense.to_fg();
w |= self.bg_color.to_bg();
w |= self.bg_intense.to_bg();
w
}
fn from_word(word: WORD) -> TextAttributes {
TextAttributes {
fg_color: Color::from_fg(word),
fg_intense: Intense::from_fg(word),
bg_color: Color::from_bg(word),
bg_intense: Intense::from_bg(word),
}
}
}
/// Whether to use intense colors or not.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Intense {
Yes,
No,
}
impl Intense {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Intense {
Intense::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Intense::No => 0,
Intense::Yes => FG_INTENSITY,
}
}
fn from_fg(word: WORD) -> Intense {
if word & FG_INTENSITY > 0 {
Intense::Yes
} else {
Intense::No
}
}
}
/// The set of available colors for use with a Windows console.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Color {
Black,
Blue,
Green,
Red,
Cyan,
Magenta,
Yellow,
White,
}
impl Color {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Color {
Color::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Color::Black => 0,
Color::Blue => FG_BLUE,
Color::Green => FG_GREEN,
Color::Red => FG_RED,
Color::Cyan => FG_CYAN,
Color::Magenta => FG_MAGENTA,
Color::Yellow => FG_YELLOW,
Color::White => FG_WHITE,
}
}
fn from_fg(word: WORD) -> Color {
match word & 0b111 {
FG_BLUE => Color::Blue,
FG_GREEN => Color::Green,
FG_RED => Color::Red,
FG_CYAN => Color::Cyan,
FG_MAGENTA => Color::Magenta,
FG_YELLOW => Color::Yellow,
FG_WHITE => Color::White,
_ => Color::Black,
}
}
}
| 29.794045 | 115 | 0.618556 |
0194116b2aa6d42e3ee789e90dad5c32165acf15 | 11,741 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Service config.
///
///
/// Service configuration allows for customization of endpoints, region, credentials providers,
/// and retry configuration. Generally, it is constructed automatically for you from a shared
/// configuration loaded by the `aws-config` crate. For example:
///
/// ```ignore
/// // Load a shared config from the environment
/// let shared_config = aws_config::from_env().load().await;
/// // The client constructor automatically converts the shared config into the service config
/// let client = Client::new(&shared_config);
/// ```
///
/// The service config can also be constructed manually using its builder.
///
pub struct Config {
app_name: Option<aws_types::app_name::AppName>,
pub(crate) timeout_config: Option<aws_smithy_types::timeout::Config>,
pub(crate) sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
pub(crate) retry_config: Option<aws_smithy_types::retry::RetryConfig>,
pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>,
pub(crate) region: Option<aws_types::region::Region>,
pub(crate) credentials_provider: aws_types::credentials::SharedCredentialsProvider,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut config = f.debug_struct("Config");
config.finish()
}
}
impl Config {
/// Constructs a config builder.
pub fn builder() -> Builder {
Builder::default()
}
/// Returns the name of the app that is using the client, if it was provided.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn app_name(&self) -> Option<&aws_types::app_name::AppName> {
self.app_name.as_ref()
}
/// Creates a new [service config](crate::Config) from a [shared `config`](aws_types::sdk_config::SdkConfig).
pub fn new(config: &aws_types::sdk_config::SdkConfig) -> Self {
Builder::from(config).build()
}
/// The signature version 4 service signing name to use in the credential scope when signing requests.
///
/// The signing service may be overridden by the `Endpoint`, or by specifying a custom
/// [`SigningService`](aws_types::SigningService) during operation construction
pub fn signing_service(&self) -> &'static str {
"mgn"
}
}
/// Builder for creating a `Config`.
#[derive(Default)]
pub struct Builder {
app_name: Option<aws_types::app_name::AppName>,
timeout_config: Option<aws_smithy_types::timeout::Config>,
sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
retry_config: Option<aws_smithy_types::retry::RetryConfig>,
endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>,
region: Option<aws_types::region::Region>,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
}
impl Builder {
/// Constructs a config builder.
pub fn new() -> Self {
Self::default()
}
/// Sets the name of the app that is using the client.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn app_name(mut self, app_name: aws_types::app_name::AppName) -> Self {
self.set_app_name(Some(app_name));
self
}
/// Sets the name of the app that is using the client.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn set_app_name(&mut self, app_name: Option<aws_types::app_name::AppName>) -> &mut Self {
self.app_name = app_name;
self
}
/// Set the timeout_config for the builder
///
/// # Examples
///
/// ```no_run
/// # use std::time::Duration;
/// use aws_sdk_mgn::config::Config;
/// use aws_smithy_types::{timeout, tristate::TriState};
///
/// let api_timeouts = timeout::Api::new()
/// .with_call_attempt_timeout(TriState::Set(Duration::from_secs(1)));
/// let timeout_config = timeout::Config::new()
/// .with_api_timeouts(api_timeouts);
/// let config = Config::builder().timeout_config(timeout_config).build();
/// ```
pub fn timeout_config(mut self, timeout_config: aws_smithy_types::timeout::Config) -> Self {
self.set_timeout_config(Some(timeout_config));
self
}
/// Set the timeout_config for the builder
///
/// # Examples
///
/// ```no_run
/// # use std::time::Duration;
/// use aws_sdk_mgn::config::{Builder, Config};
/// use aws_smithy_types::{timeout, tristate::TriState};
///
/// fn set_request_timeout(builder: &mut Builder) {
/// let api_timeouts = timeout::Api::new()
/// .with_call_attempt_timeout(TriState::Set(Duration::from_secs(1)));
/// let timeout_config = timeout::Config::new()
/// .with_api_timeouts(api_timeouts);
/// builder.set_timeout_config(Some(timeout_config));
/// }
///
/// let mut builder = Config::builder();
/// set_request_timeout(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_timeout_config(
&mut self,
timeout_config: Option<aws_smithy_types::timeout::Config>,
) -> &mut Self {
self.timeout_config = timeout_config;
self
}
/// Set the sleep_impl for the builder
///
/// # Examples
///
/// ```no_run
/// use aws_sdk_mgn::config::Config;
/// use aws_smithy_async::rt::sleep::AsyncSleep;
/// use aws_smithy_async::rt::sleep::Sleep;
///
/// #[derive(Debug)]
/// pub struct ForeverSleep;
///
/// impl AsyncSleep for ForeverSleep {
/// fn sleep(&self, duration: std::time::Duration) -> Sleep {
/// Sleep::new(std::future::pending())
/// }
/// }
///
/// let sleep_impl = std::sync::Arc::new(ForeverSleep);
/// let config = Config::builder().sleep_impl(sleep_impl).build();
/// ```
pub fn sleep_impl(
mut self,
sleep_impl: std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>,
) -> Self {
self.set_sleep_impl(Some(sleep_impl));
self
}
/// Set the sleep_impl for the builder
///
/// # Examples
///
/// ```no_run
/// use aws_sdk_mgn::config::{Builder, Config};
/// use aws_smithy_async::rt::sleep::AsyncSleep;
/// use aws_smithy_async::rt::sleep::Sleep;
///
/// #[derive(Debug)]
/// pub struct ForeverSleep;
///
/// impl AsyncSleep for ForeverSleep {
/// fn sleep(&self, duration: std::time::Duration) -> Sleep {
/// Sleep::new(std::future::pending())
/// }
/// }
///
/// fn set_never_ending_sleep_impl(builder: &mut Builder) {
/// let sleep_impl = std::sync::Arc::new(ForeverSleep);
/// builder.set_sleep_impl(Some(sleep_impl));
/// }
///
/// let mut builder = Config::builder();
/// set_never_ending_sleep_impl(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_sleep_impl(
&mut self,
sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
) -> &mut Self {
self.sleep_impl = sleep_impl;
self
}
/// Set the retry_config for the builder
///
/// # Examples
/// ```no_run
/// use aws_sdk_mgn::config::Config;
/// use aws_smithy_types::retry::RetryConfig;
///
/// let retry_config = RetryConfig::new().with_max_attempts(5);
/// let config = Config::builder().retry_config(retry_config).build();
/// ```
pub fn retry_config(mut self, retry_config: aws_smithy_types::retry::RetryConfig) -> Self {
self.set_retry_config(Some(retry_config));
self
}
/// Set the retry_config for the builder
///
/// # Examples
/// ```no_run
/// use aws_sdk_mgn::config::{Builder, Config};
/// use aws_smithy_types::retry::RetryConfig;
///
/// fn disable_retries(builder: &mut Builder) {
/// let retry_config = RetryConfig::new().with_max_attempts(1);
/// builder.set_retry_config(Some(retry_config));
/// }
///
/// let mut builder = Config::builder();
/// disable_retries(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_retry_config(
&mut self,
retry_config: Option<aws_smithy_types::retry::RetryConfig>,
) -> &mut Self {
self.retry_config = retry_config;
self
}
// TODO(docs): include an example of using a static endpoint
/// Sets the endpoint resolver to use when making requests.
pub fn endpoint_resolver(
mut self,
endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static,
) -> Self {
self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver));
self
}
/// Sets the AWS region to use when making requests.
///
/// # Examples
/// ```no_run
/// use aws_types::region::Region;
/// use aws_sdk_mgn::config::{Builder, Config};
///
/// let config = aws_sdk_mgn::Config::builder()
/// .region(Region::new("us-east-1"))
/// .build();
/// ```
pub fn region(mut self, region: impl Into<Option<aws_types::region::Region>>) -> Self {
self.region = region.into();
self
}
/// Sets the credentials provider for this service
pub fn credentials_provider(
mut self,
credentials_provider: impl aws_types::credentials::ProvideCredentials + 'static,
) -> Self {
self.credentials_provider = Some(aws_types::credentials::SharedCredentialsProvider::new(
credentials_provider,
));
self
}
/// Sets the credentials provider for this service
pub fn set_credentials_provider(
&mut self,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
) -> &mut Self {
self.credentials_provider = credentials_provider;
self
}
/// Builds a [`Config`].
pub fn build(self) -> Config {
Config {
app_name: self.app_name,
timeout_config: self.timeout_config,
sleep_impl: self.sleep_impl,
retry_config: self.retry_config,
endpoint_resolver: self
.endpoint_resolver
.unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())),
region: self.region,
credentials_provider: self.credentials_provider.unwrap_or_else(|| {
aws_types::credentials::SharedCredentialsProvider::new(
crate::no_credentials::NoCredentials,
)
}),
}
}
}
impl From<&aws_types::sdk_config::SdkConfig> for Builder {
fn from(input: &aws_types::sdk_config::SdkConfig) -> Self {
let mut builder = Builder::default();
builder = builder.region(input.region().cloned());
builder.set_retry_config(input.retry_config().cloned());
builder.set_timeout_config(input.timeout_config().cloned());
builder.set_sleep_impl(input.sleep_impl().clone());
builder.set_credentials_provider(input.credentials_provider().cloned());
builder.set_app_name(input.app_name().cloned());
builder
}
}
impl From<&aws_types::sdk_config::SdkConfig> for Config {
fn from(sdk_config: &aws_types::sdk_config::SdkConfig) -> Self {
Builder::from(sdk_config).build()
}
}
| 36.690625 | 113 | 0.623116 |
dec3b64aded4c23b5fbcf1266ef8a11b4ed852f9 | 2,540 | use crate::aliases::{Archive, BridgeConfig, GrpcClient};
use crate::server::make_server;
use anyhow::Result;
use daml_grpc::DamlGrpcClientBuilder;
use daml_lf::element::DamlArchive;
use daml_util::package::{ArchiveAutoNamingStyle, DamlPackages};
use std::sync::Arc;
use tokio::sync::RwLock;
use tokio::time::Duration;
use tracing::{error, info};
/// Daml JSON<>GRPC API bridge.
pub struct Bridge {
config: BridgeConfig,
}
impl Bridge {
/// Create a new [`Bridge`].
pub const fn new(config: BridgeConfig) -> Self {
Self {
config,
}
}
/// Start the bridge.
pub async fn run(&self) -> Result<()> {
let grpc_client = Arc::new(
DamlGrpcClientBuilder::uri(self.config.ledger_uri())
.connect_timeout(Some(self.config.ledger_connect_timeout()))
.timeout(self.config.ledger_timeout())
.with_auth(self.config.ledger_token().to_owned())
.connect()
.await?,
);
let archive: Archive = Arc::new(RwLock::new(fetch_archive(&grpc_client).await?));
let http_server = make_server(self.config.clone(), archive.clone(), grpc_client.clone())?;
let package_refresher = refresh(archive.clone(), grpc_client.clone(), self.config.package_reload_interval());
info!("Ready");
let http_handle = tokio::spawn(http_server);
let refresher_handle = tokio::spawn(package_refresher);
let _result = tokio::join!(http_handle, refresher_handle);
Ok(())
}
}
/// Refresh the [`Archive`] from the ledger server.
async fn refresh(archive: Archive, grpc_client: GrpcClient, interval: Duration) {
let mut timer = tokio::time::interval(interval);
let _ = timer.tick().await;
loop {
let now = timer.tick().await;
info!("refreshing Dar (Time now = {:?})", now);
let new_archive = fetch_archive(&grpc_client).await;
match new_archive {
Ok(new_arch) => *archive.write().await = new_arch,
Err(e) => {
error!("error refreshing Dar from ledger: {}", e);
},
}
}
}
async fn fetch_archive(grpc_client: &GrpcClient) -> Result<DamlArchive<'static>> {
let all_packages = DamlPackages::from_ledger(grpc_client).await?;
tokio::task::spawn_blocking(move || create_archive(all_packages)).await?
}
fn create_archive(packages: DamlPackages) -> Result<DamlArchive<'static>> {
Ok(packages.into_dar(ArchiveAutoNamingStyle::Uuid)?.to_owned_archive()?)
}
| 35.774648 | 117 | 0.635827 |
4b0c3b8204309f5c5c2095af8f156ba54eec3316 | 35,179 | // Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![recursion_limit="128"]
#![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
extern crate clap;
extern crate env_logger;
extern crate hab;
extern crate habitat_core as hcore;
extern crate habitat_common as common;
extern crate handlebars;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate base64;
use std::env;
use std::ffi::OsString;
use std::io::{self, Read};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::thread;
use clap::{ArgMatches, Shell};
use common::command::package::install::{InstallMode, InstallSource};
use common::ui::UI;
use hcore::channel;
use hcore::crypto::{init, default_cache_key_path, SigKeyPair};
use hcore::crypto::keys::PairType;
use hcore::env as henv;
use hcore::fs::{cache_artifact_path, cache_analytics_path, cache_key_path};
use hcore::package::PackageIdent;
use hcore::service::ServiceGroup;
use hcore::url::default_bldr_url;
use hcore::binlink::default_binlink_dir;
use hab::{AUTH_TOKEN_ENVVAR, ORIGIN_ENVVAR, PRODUCT, VERSION};
use hab::analytics;
use hab::cli;
use hab::command;
use hab::config;
use hab::feat;
use hab::scaffolding;
use hab::error::{Error, Result};
/// Makes the --org CLI param optional when this env var is set
const HABITAT_ORG_ENVVAR: &'static str = "HAB_ORG";
lazy_static! {
/// The default filesystem root path to base all commands from. This is lazily generated on
/// first call and reflects on the presence and value of the environment variable keyed as
/// `FS_ROOT_ENVVAR`.
static ref FS_ROOT: PathBuf = {
use hcore::fs::FS_ROOT_ENVVAR;
if let Some(root) = henv::var(FS_ROOT_ENVVAR).ok() {
PathBuf::from(root)
} else {
PathBuf::from("/")
}
};
}
fn main() {
env_logger::init();
let mut ui = UI::default_with_env();
enable_features_from_env(&mut ui);
thread::spawn(|| analytics::instrument_subcommand());
if let Err(e) = start(&mut ui) {
ui.fatal(e).unwrap();
std::process::exit(1)
}
}
fn start(ui: &mut UI) -> Result<()> {
exec_subcommand_if_called(ui)?;
let (args, remaining_args) = raw_parse_args();
debug!("clap cli args: {:?}", &args);
debug!("remaining cli args: {:?}", &remaining_args);
// We build the command tree in a separate thread to eliminate
// possible stack overflow crashes at runtime. OSX, for instance,
// will crash with our large tree. This is a known issue:
// https://github.com/kbknapp/clap-rs/issues/86
let child = thread::Builder::new()
.stack_size(8 * 1024 * 1024)
.spawn(move || {
return cli::get()
.get_matches_from_safe_borrow(&mut args.iter())
.unwrap_or_else(|e| {
analytics::instrument_clap_error(&e);
e.exit();
});
})
.unwrap();
let app_matches = child.join().unwrap();
match app_matches.subcommand() {
("cli", Some(matches)) => {
match matches.subcommand() {
("setup", Some(_)) => sub_cli_setup(ui)?,
("completers", Some(m)) => sub_cli_completers(m)?,
_ => unreachable!(),
}
}
("install", Some(m)) => sub_pkg_install(ui, m)?,
("origin", Some(matches)) => {
match matches.subcommand() {
("key", Some(m)) => {
match m.subcommand() {
("download", Some(sc)) => sub_origin_key_download(ui, sc)?,
("export", Some(sc)) => sub_origin_key_export(sc)?,
("generate", Some(sc)) => sub_origin_key_generate(ui, sc)?,
("import", Some(_)) => sub_origin_key_import(ui)?,
("upload", Some(sc)) => sub_origin_key_upload(ui, sc)?,
_ => unreachable!(),
}
}
("secret", Some(m)) => {
match m.subcommand() {
("upload", Some(sc)) => sub_origin_secret_upload(ui, sc)?,
("delete", Some(sc)) => sub_origin_secret_delete(ui, sc)?,
("list", Some(sc)) => sub_origin_secret_list(ui, sc)?,
_ => unreachable!(),
}
}
_ => unreachable!(),
}
}
("bldr", Some(matches)) => {
match matches.subcommand() {
("job", Some(m)) => {
match m.subcommand() {
("start", Some(m)) => sub_bldr_job_start(ui, m)?,
("cancel", Some(m)) => sub_bldr_job_cancel(ui, m)?,
("promote", Some(m)) => sub_bldr_job_promote_or_demote(ui, m, true)?,
("demote", Some(m)) => sub_bldr_job_promote_or_demote(ui, m, false)?,
("status", Some(m)) => sub_bldr_job_status(ui, m)?,
_ => unreachable!(),
}
}
("channel", Some(m)) => {
match m.subcommand() {
("create", Some(m)) => sub_bldr_channel_create(ui, m)?,
("destroy", Some(m)) => sub_bldr_channel_destroy(ui, m)?,
("list", Some(m)) => sub_bldr_channel_list(ui, m)?,
_ => unreachable!(),
}
}
_ => unreachable!(),
}
}
("pkg", Some(matches)) => {
match matches.subcommand() {
("binds", Some(m)) => sub_pkg_binds(m)?,
("binlink", Some(m)) => sub_pkg_binlink(ui, m)?,
("build", Some(m)) => sub_pkg_build(ui, m)?,
("channels", Some(m)) => sub_pkg_channels(ui, m)?,
("config", Some(m)) => sub_pkg_config(m)?,
("env", Some(m)) => sub_pkg_env(m)?,
("exec", Some(m)) => sub_pkg_exec(m, remaining_args)?,
("export", Some(m)) => sub_pkg_export(ui, m)?,
("hash", Some(m)) => sub_pkg_hash(m)?,
("install", Some(m)) => sub_pkg_install(ui, m)?,
("path", Some(m)) => sub_pkg_path(m)?,
("provides", Some(m)) => sub_pkg_provides(m)?,
("search", Some(m)) => sub_pkg_search(ui, m)?,
("sign", Some(m)) => sub_pkg_sign(ui, m)?,
("upload", Some(m)) => sub_pkg_upload(ui, m)?,
("verify", Some(m)) => sub_pkg_verify(ui, m)?,
("header", Some(m)) => sub_pkg_header(ui, m)?,
("info", Some(m)) => sub_pkg_info(ui, m)?,
("promote", Some(m)) => sub_pkg_promote(ui, m)?,
("demote", Some(m)) => sub_pkg_demote(ui, m)?,
_ => unreachable!(),
}
}
("plan", Some(matches)) => {
match matches.subcommand() {
("init", Some(m)) => sub_plan_init(ui, m)?,
_ => unreachable!(),
}
}
("ring", Some(matches)) => {
match matches.subcommand() {
("key", Some(m)) => {
match m.subcommand() {
("export", Some(sc)) => sub_ring_key_export(sc)?,
("import", Some(_)) => sub_ring_key_import(ui)?,
("generate", Some(sc)) => sub_ring_key_generate(ui, sc)?,
_ => unreachable!(),
}
}
_ => unreachable!(),
}
}
("svc", Some(matches)) => {
match matches.subcommand() {
("key", Some(m)) => {
match m.subcommand() {
("generate", Some(sc)) => sub_service_key_generate(ui, sc)?,
_ => unreachable!(),
}
}
_ => unreachable!(),
}
}
("setup", Some(_)) => sub_cli_setup(ui)?,
("user", Some(matches)) => {
match matches.subcommand() {
("key", Some(m)) => {
match m.subcommand() {
("generate", Some(sc)) => sub_user_key_generate(ui, sc)?,
_ => unreachable!(),
}
}
_ => unreachable!(),
}
}
_ => unreachable!(),
};
Ok(())
}
fn sub_cli_setup(ui: &mut UI) -> Result<()> {
init();
command::cli::setup::start(
ui,
&default_cache_key_path(Some(&*FS_ROOT)),
&cache_analytics_path(Some(&*FS_ROOT)),
)
}
fn sub_cli_completers(m: &ArgMatches) -> Result<()> {
let shell = m.value_of("SHELL").expect(
"Missing Shell; A shell is required",
);
cli::get().gen_completions_to("hab", shell.parse::<Shell>().unwrap(), &mut io::stdout());
Ok(())
}
fn sub_origin_key_download(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let origin = m.value_of("ORIGIN").unwrap(); // Required via clap
let revision = m.value_of("REVISION");
let with_secret = m.is_present("WITH_SECRET");
let with_encryption = m.is_present("WITH_ENCRYPTION");
let token = maybe_auth_token(ui, &m);
let url = bldr_url_from_matches(m);
command::origin::key::download::start(
ui,
&url,
&origin,
revision,
with_secret,
with_encryption,
token.as_ref().map(String::as_str),
&default_cache_key_path(Some(&*FS_ROOT)),
)
}
fn sub_origin_key_export(m: &ArgMatches) -> Result<()> {
let origin = m.value_of("ORIGIN").unwrap(); // Required via clap
let pair_type = PairType::from_str(m.value_of("PAIR_TYPE").unwrap_or("public"))?;
init();
command::origin::key::export::start(origin, pair_type, &default_cache_key_path(Some(&*FS_ROOT)))
}
fn sub_origin_key_generate(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let origin = origin_param_or_env(&m)?;
init();
command::origin::key::generate::start(ui, &origin, &default_cache_key_path(Some(&*FS_ROOT)))
}
fn sub_origin_key_import(ui: &mut UI) -> Result<()> {
let mut content = String::new();
io::stdin().read_to_string(&mut content)?;
init();
// Trim the content to lose line feeds added by Powershell pipeline
command::origin::key::import::start(
ui,
content.trim(),
&default_cache_key_path(Some(&*FS_ROOT)),
)
}
fn sub_origin_key_upload(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let token = auth_token_param_or_env(ui, &m)?;
init();
if m.is_present("ORIGIN") {
let origin = m.value_of("ORIGIN").unwrap(); // Required via clap
// you can either specify files, or infer the latest key names
let with_secret = m.is_present("WITH_SECRET");
command::origin::key::upload_latest::start(
ui,
&url,
&token,
origin,
with_secret,
&default_cache_key_path(Some(&*FS_ROOT)),
)
} else {
let keyfile = Path::new(m.value_of("PUBLIC_FILE").unwrap());
let secret_keyfile = m.value_of("SECRET_FILE").map(|f| Path::new(f));
command::origin::key::upload::start(ui, &url, &token, &keyfile, secret_keyfile)
}
}
fn sub_origin_secret_upload(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let token = auth_token_param_or_env(ui, &m)?;
let origin = origin_param_or_env(&m)?;
let key = m.value_of("KEY_NAME").unwrap();
let secret = m.value_of("SECRET").unwrap();
command::origin::secret::upload::start(
ui,
&url,
&token,
&origin,
&key,
&secret,
&default_cache_key_path(Some(&*FS_ROOT)),
)
}
fn sub_origin_secret_delete(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let token = auth_token_param_or_env(ui, &m)?;
let origin = origin_param_or_env(&m)?;
let key = m.value_of("KEY_NAME").unwrap();
command::origin::secret::delete::start(ui, &url, &token, &origin, &key)
}
fn sub_origin_secret_list(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let token = auth_token_param_or_env(ui, &m)?;
let origin = origin_param_or_env(&m)?;
command::origin::secret::list::start(ui, &url, &token, &origin)
}
fn sub_pkg_binlink(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?;
let dest_dir = binlink_dest_dir_from_matches(m);
let force = m.is_present("FORCE");
match m.value_of("BINARY") {
Some(binary) => {
command::pkg::binlink::start(ui, &ident, &binary, &dest_dir, &*FS_ROOT, force)
}
None => command::pkg::binlink::binlink_all_in_pkg(ui, &ident, &dest_dir, &*FS_ROOT, force),
}
}
fn sub_pkg_build(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let plan_context = m.value_of("PLAN_CONTEXT").unwrap(); // Required via clap
let root = m.value_of("HAB_STUDIO_ROOT");
let src = m.value_of("SRC_PATH");
let keys_string = match m.values_of("HAB_ORIGIN_KEYS") {
Some(keys) => {
init();
for key in keys.clone() {
// Validate that all secret keys are present
let pair = SigKeyPair::get_latest_pair_for(
key,
&default_cache_key_path(Some(&*FS_ROOT)),
None,
)?;
let _ = pair.secret();
}
Some(keys.collect::<Vec<_>>().join(","))
}
None => None,
};
let keys: Option<&str> = match keys_string.as_ref() {
Some(s) => Some(s),
None => None,
};
let docker = m.is_present("DOCKER");
let reuse = m.is_present("REUSE");
let windows = m.is_present("WINDOWS");
command::pkg::build::start(ui, plan_context, root, src, keys, reuse, windows, docker)
}
fn sub_pkg_config(m: &ArgMatches) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?;
common::command::package::config::start(&ident, &*FS_ROOT)?;
Ok(())
}
fn sub_pkg_binds(m: &ArgMatches) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?;
common::command::package::binds::start(&ident, &*FS_ROOT)?;
Ok(())
}
fn sub_pkg_env(m: &ArgMatches) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?;
command::pkg::env::start(&ident, &*FS_ROOT)
}
fn sub_pkg_exec(m: &ArgMatches, cmd_args: Vec<OsString>) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?; // Required via clap
let cmd = m.value_of("CMD").unwrap(); // Required via clap
command::pkg::exec::start(&ident, cmd, cmd_args)
}
fn sub_pkg_export(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?;
let format = &m.value_of("FORMAT").unwrap();
let url = bldr_url_from_matches(m);
let channel = m.value_of("CHANNEL")
.and_then(|c| Some(c.to_string()))
.unwrap_or(channel::default());
let export_fmt = command::pkg::export::format_for(ui, &format)?;
command::pkg::export::start(ui, &url, &channel, &ident, &export_fmt)
}
fn sub_pkg_hash(m: &ArgMatches) -> Result<()> {
init();
match m.value_of("SOURCE") {
Some(source) => {
// hash single file
command::pkg::hash::start(&source)
}
None => {
// read files from stdin
let stdin = io::stdin();
for line in stdin.lock().lines() {
let file = line?;
command::pkg::hash::start(file.trim_right())?;
}
Ok(())
}
}
}
fn sub_bldr_channel_create(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let origin = origin_param_or_env(&m)?;
let channel = m.value_of("CHANNEL").unwrap(); // Required via clap
let token = auth_token_param_or_env(ui, &m)?;
command::bldr::channel::create::start(ui, &url, &token, &origin, &channel)
}
fn sub_bldr_channel_destroy(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let origin = origin_param_or_env(&m)?;
let channel = m.value_of("CHANNEL").unwrap(); // Required via clap
let token = auth_token_param_or_env(ui, &m)?;
command::bldr::channel::destroy::start(ui, &url, &token, &origin, &channel)
}
fn sub_bldr_channel_list(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let origin = origin_param_or_env(&m)?;
command::bldr::channel::list::start(ui, &url, &origin)
}
fn sub_bldr_job_start(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?; // Required via clap
let url = bldr_url_from_matches(m);
let group = m.is_present("GROUP");
let token = auth_token_param_or_env(ui, &m)?;
command::bldr::job::start::start(ui, &url, &ident, &token, group)
}
fn sub_bldr_job_cancel(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let group_id = m.value_of("GROUP_ID").unwrap(); // Required via clap
let token = auth_token_param_or_env(ui, &m)?;
command::bldr::job::cancel::start(ui, &url, &group_id, &token)
}
fn sub_bldr_job_promote_or_demote(ui: &mut UI, m: &ArgMatches, promote: bool) -> Result<()> {
let url = bldr_url_from_matches(m);
let group_id = m.value_of("GROUP_ID").unwrap(); // Required via clap
let channel = m.value_of("CHANNEL").unwrap(); // Required via clap
let origin = m.value_of("ORIGIN");
let interactive = m.is_present("INTERACTIVE");
let verbose = m.is_present("VERBOSE");
let token = auth_token_param_or_env(ui, &m)?;
command::bldr::job::promote::start(
ui,
&url,
&group_id,
&channel,
origin,
interactive,
verbose,
&token,
promote,
)
}
fn sub_bldr_job_status(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let group_id = m.value_of("GROUP_ID");
let origin = m.value_of("ORIGIN");
let limit = m.value_of("LIMIT")
.unwrap_or("10")
.parse::<usize>()
.unwrap();
let show_jobs = m.is_present("SHOW_JOBS");
command::bldr::job::status::start(ui, &url, group_id, origin, limit, show_jobs)
}
fn sub_plan_init(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let name = m.value_of("PKG_NAME").map(|v| v.into());
let origin = origin_param_or_env(&m)?;
let with_docs = m.is_present("WITH_DOCS");
let with_callbacks = m.is_present("WITH_CALLBACKS");
let with_all = m.is_present("WITH_ALL");
let windows = m.is_present("WINDOWS");
let scaffolding_ident = if windows {
match m.value_of("SCAFFOLDING") {
Some(scaffold) => Some(PackageIdent::from_str(scaffold)?),
None => None,
}
} else {
scaffolding::scaffold_check(ui, m.value_of("SCAFFOLDING"))?
};
command::plan::init::start(
ui,
origin,
with_docs,
with_callbacks,
with_all,
windows,
scaffolding_ident,
name,
)
}
fn sub_pkg_install(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let channel = channel_from_matches(m);
let install_sources = install_sources_from_matches(m)?;
let token = maybe_auth_token(ui, &m);
let install_mode = if feat::is_enabled(feat::OfflineInstall) && m.is_present("OFFLINE") {
InstallMode::Offline
} else {
InstallMode::default()
};
init();
for install_source in install_sources.iter() {
let pkg_install = common::command::package::install::start(
ui,
&url,
Some(&channel),
install_source,
PRODUCT,
VERSION,
&*FS_ROOT,
&cache_artifact_path(Some(&*FS_ROOT)),
token.as_ref().map(String::as_str),
&install_mode,
)?;
if m.is_present("BINLINK") {
let dest_dir = binlink_dest_dir_from_matches(m);
let force = m.is_present("FORCE");
command::pkg::binlink::binlink_all_in_pkg(
ui,
pkg_install.ident(),
dest_dir,
&*FS_ROOT,
force,
)?;
}
}
Ok(())
}
fn sub_pkg_path(m: &ArgMatches) -> Result<()> {
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?;
command::pkg::path::start(&ident, &*FS_ROOT)
}
fn sub_pkg_provides(m: &ArgMatches) -> Result<()> {
let filename = m.value_of("FILE").unwrap(); // Required via clap
let full_releases = m.is_present("FULL_RELEASES");
let full_paths = m.is_present("FULL_PATHS");
command::pkg::provides::start(&filename, &*FS_ROOT, full_releases, full_paths)
}
fn sub_pkg_search(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let search_term = m.value_of("SEARCH_TERM").unwrap(); // Required via clap
let token = maybe_auth_token(ui, &m);
command::pkg::search::start(&search_term, &url, token.as_ref().map(String::as_str))
}
fn sub_pkg_sign(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let src = Path::new(m.value_of("SOURCE").unwrap()); // Required via clap
let dst = Path::new(m.value_of("DEST").unwrap()); // Required via clap
init();
let pair = SigKeyPair::get_latest_pair_for(
&origin_param_or_env(&m)?,
&default_cache_key_path(Some(&*FS_ROOT)),
Some(&PairType::Secret),
)?;
command::pkg::sign::start(ui, &pair, &src, &dst)
}
fn sub_pkg_upload(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let key_path = cache_key_path(Some(&*FS_ROOT));
let url = bldr_url_from_matches(m);
// When packages are uploaded, they *always* go to `unstable`;
// they can optionally get added to another channel, too.
let additional_release_channel: Option<&str> = m.value_of("CHANNEL");
let token = auth_token_param_or_env(ui, &m)?;
let artifact_paths = m.values_of("HART_FILE").unwrap(); // Required via clap
for artifact_path in artifact_paths {
command::pkg::upload::start(
ui,
&url,
additional_release_channel,
&token,
&artifact_path,
&key_path,
)?;
}
Ok(())
}
fn sub_pkg_verify(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let src = Path::new(m.value_of("SOURCE").unwrap()); // Required via clap
init();
command::pkg::verify::start(ui, &src, &default_cache_key_path(Some(&*FS_ROOT)))
}
fn sub_pkg_header(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let src = Path::new(m.value_of("SOURCE").unwrap()); // Required via clap
init();
command::pkg::header::start(ui, &src)
}
fn sub_pkg_info(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let src = Path::new(m.value_of("SOURCE").unwrap()); // Required via clap
let to_json = m.is_present("TO_JSON");
init();
command::pkg::info::start(ui, &src, to_json)
}
fn sub_pkg_promote(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let channel = m.value_of("CHANNEL").unwrap();
let token = auth_token_param_or_env(ui, &m)?;
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?; // Required via clap
command::pkg::promote::start(ui, &url, &ident, &channel, &token)
}
fn sub_pkg_demote(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let channel = m.value_of("CHANNEL").unwrap();
let token = auth_token_param_or_env(ui, &m)?;
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?; // Required via clap
command::pkg::demote::start(ui, &url, &ident, &channel, &token)
}
fn sub_pkg_channels(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let url = bldr_url_from_matches(m);
let ident = PackageIdent::from_str(m.value_of("PKG_IDENT").unwrap())?; // Required via clap
let token = maybe_auth_token(ui, &m);
command::pkg::channels::start(ui, &url, &ident, token.as_ref().map(String::as_str))
}
fn sub_ring_key_export(m: &ArgMatches) -> Result<()> {
let ring = m.value_of("RING").unwrap(); // Required via clap
init();
command::ring::key::export::start(ring, &default_cache_key_path(Some(&*FS_ROOT)))
}
fn sub_ring_key_generate(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let ring = m.value_of("RING").unwrap(); // Required via clap
init();
command::ring::key::generate::start(ui, ring, &default_cache_key_path(Some(&*FS_ROOT)))
}
fn sub_ring_key_import(ui: &mut UI) -> Result<()> {
let mut content = String::new();
io::stdin().read_to_string(&mut content)?;
init();
// Trim the content to lose line feeds added by Powershell pipeline
command::ring::key::import::start(ui, content.trim(), &default_cache_key_path(Some(&*FS_ROOT)))
}
fn sub_service_key_generate(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let org = org_param_or_env(&m)?;
let service_group = ServiceGroup::from_str(m.value_of("SERVICE_GROUP").unwrap())?;
init();
command::service::key::generate::start(
ui,
&org,
&service_group,
&default_cache_key_path(Some(&*FS_ROOT)),
)
}
fn sub_user_key_generate(ui: &mut UI, m: &ArgMatches) -> Result<()> {
let user = m.value_of("USER").unwrap(); // Required via clap
init();
command::user::key::generate::start(ui, user, &default_cache_key_path(Some(&*FS_ROOT)))
}
fn exec_subcommand_if_called(ui: &mut UI) -> Result<()> {
let mut args = env::args();
match (
args.nth(1).unwrap_or_default().as_str(),
args.next().unwrap_or_default().as_str(),
args.next().unwrap_or_default().as_str(),
) {
("butterfly", _, _) => command::butterfly::start(ui, env::args_os().skip(2).collect()),
("apply", _, _) => {
let mut args: Vec<OsString> = env::args_os().skip(1).collect();
args.insert(0, OsString::from("config"));
command::butterfly::start(ui, args)
}
("config", _, _) | ("file", _, _) => {
command::butterfly::start(ui, env::args_os().skip(1).collect())
}
("pkg", "export", "docker") => {
command::pkg::export::docker::start(ui, env::args_os().skip(4).collect())
}
("pkg", "export", "cf") => {
command::pkg::export::cf::start(ui, env::args_os().skip(4).collect())
}
("pkg", "export", "helm") => {
command::pkg::export::helm::start(ui, env::args_os().skip(4).collect())
}
("pkg", "export", "k8s") |
("pkg", "export", "kubernetes") => {
command::pkg::export::kubernetes::start(ui, env::args_os().skip(4).collect())
}
("pkg", "export", "tar") => {
command::pkg::export::tar::start(ui, env::args_os().skip(4).collect())
}
("run", _, _) => command::launcher::start(ui, env::args_os().skip(1).collect()),
("stu", _, _) | ("stud", _, _) | ("studi", _, _) | ("studio", _, _) => {
command::studio::enter::start(ui, env::args_os().skip(2).collect())
}
("sup", "run", _) |
("sup", "start", _) => command::launcher::start(ui, env::args_os().skip(2).collect()),
("sup", _, _) => command::sup::start(ui, env::args_os().skip(2).collect()),
("start", _, _) => command::launcher::start(ui, env::args_os().skip(1).collect()),
("stop", _, _) => command::sup::start(ui, env::args_os().skip(1).collect()),
("svc", "start", _) => command::launcher::start(ui, env::args_os().skip(2).collect()),
("svc", "load", _) |
("svc", "unload", _) |
("svc", "status", _) |
("svc", "stop", _) => command::sup::start(ui, env::args_os().skip(2).collect()),
("term", _, _) => command::sup::start(ui, env::args_os().skip(1).collect()),
_ => Ok(()),
}
}
/// Parse the raw program arguments and split off any arguments that will skip clap's parsing.
///
/// **Note** with the current version of clap there is no clean way to ignore arguments after a
/// certain point, especially if those arguments look like further options and flags.
fn raw_parse_args() -> (Vec<OsString>, Vec<OsString>) {
let mut args = env::args();
match (
args.nth(1).unwrap_or_default().as_str(),
args.next().unwrap_or_default().as_str(),
) {
("pkg", "exec") => {
if args.by_ref().count() > 2 {
return (
env::args_os().take(5).collect(),
env::args_os().skip(5).collect(),
);
} else {
(env::args_os().collect(), Vec::new())
}
}
_ => (env::args_os().collect(), Vec::new()),
}
}
// Temporary warning function
fn warn_accesss_token(token: &str, ui: &mut UI) {
if !token.starts_with("_") {
ui.warn("WARNING:").unwrap();
ui.warn(
"Github tokens are being deprecated, please migrate to a Habitat Personal \
Access Token instead.",
).unwrap();
ui.warn(
"To generate a Habitat Personal Access token, please visit the Builder Profile \
page (https://bldr.habitat.sh/#/profile).",
).unwrap();
ui.warn(
"For more information, please read the documentation at \
https://www.habitat.sh/docs/using-builder/",
).unwrap();
}
}
/// Check to see if the user has passed in an AUTH_TOKEN param. If not, check the
/// HAB_AUTH_TOKEN env var. If not, check the CLI config to see if there is a default auth
/// token set. If that's empty too, then error.
fn auth_token_param_or_env(ui: &mut UI, m: &ArgMatches) -> Result<String> {
match m.value_of("AUTH_TOKEN") {
Some(o) => Ok(o.to_string()),
None => {
match henv::var(AUTH_TOKEN_ENVVAR) {
Ok(v) => {
// Temporary warning until we deprecate Github tokens completely
warn_accesss_token(&v, ui);
Ok(v)
}
Err(_) => {
let config = config::load()?;
match config.auth_token {
Some(v) => Ok(v),
None => return Err(Error::ArgumentError("No auth token specified")),
}
}
}
}
}
}
/// Check to see if an auth token exists and convert it to a string slice if it does. Unlike
/// auth_token_param_or_env, it's ok for no auth token to be present here. This is useful for
/// commands that can optionally take an auth token for operating on private packages.
fn maybe_auth_token(ui: &mut UI, m: &ArgMatches) -> Option<String> {
match auth_token_param_or_env(ui, &m) {
Ok(t) => {
// Temporary warning until we deprecate Github tokens completely
warn_accesss_token(&t, ui);
Some(t)
}
Err(_) => None,
}
}
/// Check to see if the user has passed in an ORIGIN param. If not, check the HABITAT_ORIGIN env
/// var. If not, check the CLI config to see if there is a default origin set. If that's empty too,
/// then error.
fn origin_param_or_env(m: &ArgMatches) -> Result<String> {
match m.value_of("ORIGIN") {
Some(o) => Ok(o.to_string()),
None => {
match henv::var(ORIGIN_ENVVAR) {
Ok(v) => Ok(v),
Err(_) => {
let config = config::load()?;
match config.origin {
Some(v) => Ok(v),
None => return Err(Error::CryptoCLI("No origin specified".to_string())),
}
}
}
}
}
}
/// Check to see if the user has passed in an ORG param.
/// If not, check the HABITAT_ORG env var. If that's
/// empty too, then error.
fn org_param_or_env(m: &ArgMatches) -> Result<String> {
match m.value_of("ORG") {
Some(o) => Ok(o.to_string()),
None => {
match henv::var(HABITAT_ORG_ENVVAR) {
Ok(v) => Ok(v),
Err(_) => return Err(Error::CryptoCLI("No organization specified".to_string())),
}
}
}
}
/// Resolve a Builder URL. Taken from the environment or from CLI args,
/// if given.
fn bldr_url_from_matches(matches: &ArgMatches) -> String {
match matches.value_of("BLDR_URL") {
Some(url) => url.to_string(),
None => default_bldr_url(),
}
}
/// Resolve a channel. Taken from the environment or from CLI args, if
/// given.
fn channel_from_matches(matches: &ArgMatches) -> String {
matches
.value_of("CHANNEL")
.and_then(|c| Some(c.to_string()))
.unwrap_or(channel::default())
}
fn binlink_dest_dir_from_matches(matches: &ArgMatches) -> PathBuf {
let env_or_default = default_binlink_dir();
Path::new(matches.value_of("DEST_DIR").unwrap_or(&env_or_default)).to_path_buf()
}
fn install_sources_from_matches(matches: &ArgMatches) -> Result<Vec<InstallSource>> {
matches.values_of("PKG_IDENT_OR_ARTIFACT")
.unwrap() // Required via clap
.map(|t| t.parse().map_err(Error::from))
.collect()
}
fn enable_features_from_env(ui: &mut UI) {
let features = vec![
(feat::List, "LIST"),
(feat::OfflineInstall, "OFFLINE_INSTALL"),
];
for feature in &features {
match henv::var(format!("HAB_FEAT_{}", feature.1)) {
Ok(ref val) if ["true", "TRUE"].contains(&val.as_str()) => {
feat::enable(feature.0);
ui.warn(&format!("Enabling feature: {:?}", feature.0))
.unwrap();
}
_ => {}
}
}
if feat::is_enabled(feat::List) {
ui.warn("Listing feature flags environment variables:")
.unwrap();
for feature in &features {
ui.warn(&format!(" * {:?}: HAB_FEAT_{}=true", feature.0, feature.1))
.unwrap();
}
}
}
| 35.970348 | 100 | 0.561642 |
3849a9ac4440f8d7ec452dc96de9d97015388fec | 1,305 | use std::fmt::{self, Debug};
mod wasm;
pub use wasm::{WasmBrTable, WasmJump};
mod unresolved;
pub use unresolved::UnresolvedJump;
use super::{BlockNum, Depth};
/// Represents a `Jump` in the code.
///
/// In Wasm there are a couple of branching instructions: `br / br_if / br_table`.
/// A branch instruction can result in `UMP-ing to other locations in the code. Without going here about the nuances of each branching instruction,
/// we want to draw a `Jump`-edges in the CFG between possible jumps. We are able to do that since there is no arbitrary `goto`(s) in Wasm.
/// The control-flow is structured and we can determine the targets of each branch.
///
/// Note: we treat `return` and `unreachable` the same as branch. We look at it as "jumping" out of the function.
/// This plays well with our design, and that's the reason why we reserve `Scope #0` to the function's entry.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Jump {
/// The `origin`'s `BlockNum`
pub origin: BlockNum,
/// The `target`'s `BlockNum`
pub target: BlockNum,
}
impl Jump {
/// Returns the jump's `origin` Block
pub fn origin(&self) -> BlockNum {
self.origin
}
/// Returns the jump's `target` Block
pub fn target(&self) -> BlockNum {
self.target
}
}
| 32.625 | 147 | 0.678161 |
9c1fe6c513e9380e2b43e189e82223ea04b89629 | 3,699 | mod common;
// app(socks) -> (socks)client(chain(chain(ws+trojan)+shadowsocks)) -> (chain(ws+trojan))server1(direct) -> (shadowsocks)server2(direct) -> echo
#[cfg(all(
feature = "outbound-socks",
feature = "inbound-socks",
feature = "outbound-ws",
feature = "outbound-trojan",
feature = "inbound-ws",
feature = "inbound-trojan",
feature = "outbound-shadowsocks",
feature = "inbound-shadowsocks",
feature = "outbound-direct",
feature = "inbound-chain",
feature = "outbound-chain",
))]
#[test]
fn test_out_chain_3() {
let config1 = r#"
{
"inbounds": [
{
"protocol": "socks",
"address": "127.0.0.1",
"port": 1086
}
],
"outbounds": [
{
"protocol": "chain",
"tag": "chain-server1-server2",
"settings": {
"actors": [
"server1",
"server2"
]
}
},
{
"protocol": "chain",
"tag": "server1",
"settings": {
"actors": [
"server1-ws",
"server1-trojan"
]
}
},
{
"protocol": "ws",
"tag": "server1-ws",
"settings": {
"path": "/leaf"
}
},
{
"protocol": "trojan",
"tag": "server1-trojan",
"settings": {
"address": "127.0.0.1",
"port": 3001,
"password": "password"
}
},
{
"protocol": "shadowsocks",
"tag": "server2",
"settings": {
"address": "127.0.0.1",
"port": 3002,
"method": "aes-128-gcm",
"password": "password"
}
}
]
}
"#;
let config2 = r#"
{
"inbounds": [
{
"protocol": "chain",
"tag": "server1",
"address": "127.0.0.1",
"port": 3001,
"settings": {
"actors": [
"ws",
"trojan"
]
}
},
{
"protocol": "ws",
"tag": "ws",
"settings": {
"path": "/leaf"
}
},
{
"protocol": "trojan",
"tag": "trojan",
"settings": {
"passwords": [
"password"
]
}
}
],
"outbounds": [
{
"protocol": "direct"
}
]
}
"#;
let config3 = r#"
{
"inbounds": [
{
"protocol": "shadowsocks",
"address": "127.0.0.1",
"port": 3002,
"settings": {
"method": "aes-128-gcm",
"password": "password"
}
}
],
"outbounds": [
{
"protocol": "direct"
}
]
}
"#;
let configs = vec![
config1.to_string(),
config2.to_string(),
config3.to_string(),
];
common::test_configs(configs, "127.0.0.1", 1086);
}
| 25.163265 | 144 | 0.311706 |
6a95e0f4a09838fe19e20071e6a6a312edf057c8 | 424 | use rand::Rng;
/// Generate a vector sized size fill with random value
///
/// # Example
/// ```
/// use rdp::model::rnd::random;
/// let vector = random(128);
/// assert_eq!(vector.len(), 128);
/// ```
pub fn random(size: usize) -> Vec<u8> {
let mut rng = rand::thread_rng();
(0..size).map(|_| rng.gen()).collect()
}
pub fn nonce() -> u32 {
let mut rng = rand::thread_rng();
rng.gen_range(1, 0x7fffffff)
} | 22.315789 | 55 | 0.584906 |
ab27165c0546c11227d55737422390a5e423c89b | 16,751 | /*
* Copyright 2019 Google LLC
* Copyright 2021 Cube Dev, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::difference_encoding::{DifferenceDecoder, DifferenceEncoder};
use crate::encoding::SparseEncoding;
use crate::normal::NormalRepresentation;
use crate::state::State;
use crate::Result;
use crate::ZetaError;
use std::cmp::min;
use std::collections::BTreeSet;
#[derive(Debug, Clone)]
pub struct SparseRepresentation {
/**
* The maximum number of bytes that the `State::sparse_data` may contain before we upgrade to
* normal. See `MAXIMUM_SPARSE_DATA_FRACTION` for more details.
*/
max_sparse_data_bytes: u32,
/** Helper object for encoding and decoding individual sparse values. */
encoding: SparseEncoding,
/**
* A buffer of integers which should be merged into the difference encoded [sparse_data]. The
* sparse representation in [sparse_data] is more space efficient but also slower to read and
* write to, so this buffer allows us to quickly return when adding new values.
*
* Original implementation uses vector here, we choose to use BTreeSet to improve merge times.
* This involves a higher memory footprint, but allows to very efficiently buffer elements
* on merges.
*/
buffer: BTreeSet<u32>,
/**
* The maximum number of elements that the [buffer] may contain before it is flushed into the
* sparse [sparseData] representation. See [MAXIMUM_BUFFER_ELEMENTS_FRACTION] for details on
* how this is computed.
*/
max_buffer_elements: u32,
}
impl SparseRepresentation {
/** The largest sparse precision supported by this implementation. */
const MAXIMUM_SPARSE_PRECISION: i32 = 25;
/**
* The maximum amount of encoded sparse data, relative to the normal representation size, before
* we upgrade to normal.
*
* Note that, while some implementations also take into consideration the size of the temporary
* (in-memory) `buffer`, we define this field only relative to the normal representation
* size as the golden tests verify that representations are upgraded consistently (relative to the
* on-disk size). This allows us to fine-tune the size of the temporary `buffer`
* independently (e.g. improving runtime performance while trading off for peak memory usage).
*/
const MAXIMUM_SPARSE_DATA_FRACTION: f32 = 0.75;
/**
* The maximum amount of elements that the temporary `buffer` may contain before it is
* flushed, relative to the number of bytes that the data in the normal representation would
* require.
*
* The thinking for this is as follows: If the number of bytes that the normal representation
* would occupy is `m`, then the maximum number of bytes that the encoded sparse data can
* occupy is `0.75m` (see [MAXIMUM_SPARSE_DATA_FRACTION] above). This leaves `0.25m = m/4` bytes
* of memory that the temporary buffer can use before the overall in-memory
* footprint of the sparse representation exceeds that of the normal representation. Since each
* element in the buffer requires 4 bytes (32-bit integers), we can at most keep `m/16` elements
* before we exceed the in-memory footprint of the normal representation data.
*
* Now the problem is that writing and reading the difference encoded data is CPU expensive (it
* is by far the limiting factor for sparse adds and merges) so there is a tradeoff between the
* memory footprint and the CPU cost.
* For this reason, we add a correction factor that allows the sparse representation to use a bit
* more memory and thereby greatly increases the speed of adds and merges.
*
* A value of `4` was chosen in consistency with a legacy HLL++ implementation but this
* is something to be evaluated critically.
*
* This results in a final elements to bytes ratio of `4 * m/16 = m/4`. This means that the
* sparse representation can (in the worst case) use 1.75x the amount of RAM than the normal
* representation would. It will always use less than [MAXIMUM_SPARSE_DATA_FRACTION] times the
* amount of space on disk, however.
*/
const MAXIMUM_BUFFER_ELEMENTS_FRACTION: f32 = 1. - Self::MAXIMUM_SPARSE_DATA_FRACTION;
pub fn new(state: &State) -> Result<SparseRepresentation> {
Self::check_precision(state.precision, state.sparse_precision)?;
let encoding = SparseEncoding::new(state.precision, state.sparse_precision);
// Compute size limits for the encoded sparse data and temporary buffer relative to what the
// normal representation would require (which is 2^p bytes).
if !(state.precision < 31) {
return Err(ZetaError::new(format!(
"expected precision < 31, got {}",
state.precision
)));
};
let m = 1 << state.precision;
let max_sparse_data_bytes = (m as f32 * Self::MAXIMUM_SPARSE_DATA_FRACTION) as u32;
if max_sparse_data_bytes <= 0 {
return Err(ZetaError::new(format!(
"max_sparse_data_bytes must be > 0, got {}",
max_sparse_data_bytes
)));
}
let max_buffer_elements = (m as f32 * Self::MAXIMUM_BUFFER_ELEMENTS_FRACTION) as u32;
if max_buffer_elements <= 0 {
return Err(ZetaError::new(format!(
"max_buffer_elements must be > 0, got {}",
max_buffer_elements
)));
}
// We have no good way of checking whether the data actually contains the given number of
// elements without decoding the data, which would be inefficient here.
return Ok(SparseRepresentation {
max_sparse_data_bytes,
encoding,
max_buffer_elements,
buffer: BTreeSet::new(),
});
}
pub fn encoding(&self) -> &SparseEncoding {
return &self.encoding;
}
fn check_precision(normal_precision: i32, sparse_precision: i32) -> Result<()> {
NormalRepresentation::check_precision(normal_precision)?;
if !(normal_precision <= sparse_precision
&& sparse_precision <= Self::MAXIMUM_SPARSE_PRECISION)
{
return Err(ZetaError::new(format!(
"Expected sparse precision to be >= normal precision ({}) and <= {}, but was {}.",
normal_precision,
Self::MAXIMUM_SPARSE_PRECISION,
sparse_precision
)));
}
return Ok(());
}
pub fn cardinality(&mut self, state: &mut State) -> u64 {
// This is the only place that panics instead of returning errors.
// TODO: we should either (1) panic everywhere or (2) return an error here.
self.flush_buffer(state).expect("could not flush buffer");
// Linear counting over the number of empty sparse buckets.
let buckets = 1 << state.sparse_precision;
let num_zeros = buckets - state.sparse_size;
let estimate = buckets as f64 * (buckets as f64 / num_zeros as f64).ln();
return estimate.round() as u64;
}
/// `self` may end up be in the invalid state on error and must not be used further.
pub fn merge_with_sparse(
&mut self,
state: &mut State,
other: &SparseRepresentation,
other_state: &State,
) -> Result<Option<NormalRepresentation>> {
// TODO: Add special case when 'this' is empty and 'other' has only encoded data.
// In that case, we can just copy over the sparse data without needing to decode and dedupe.
return self.add_sparse_values(state, other, other_state);
}
#[must_use]
pub fn merge_with_normal(
&mut self,
state: &mut State,
other: &NormalRepresentation,
other_state: &State,
) -> Result<Option<NormalRepresentation>> {
let mut normal = self.normalize(state)?;
normal.merge_with_normal(state, other, other_state);
return Ok(Some(normal));
}
fn add_sparse_values(
&mut self,
state: &mut State,
other: &SparseRepresentation,
other_state: &State,
) -> Result<Option<NormalRepresentation>> {
self.encoding.assert_compatible(&other.encoding);
if !other.buffer.is_empty() {
self.buffer.extend(other.buffer.iter())
}
if other_state.sparse_size < 0 {
return Err(ZetaError::new(format!(
"negative sparse_size: {}",
other_state.sparse_size
)));
}
if (other_state.sparse_size as u32) < self.max_buffer_elements {
for e in Self::sorted_iterator(other_state.sparse_data.as_deref()) {
let e = e?;
self.buffer.insert(e);
}
} else {
// Special case when encodings are the same. Then we can profit from the fact that
// sparse_values are sorted (as defined in the add_sparse_values contract) and do a
// merge-join.
self.flush_buffer(state)?;
let self_data = state.sparse_data.take();
self.merge_and_set(
state,
Self::sorted_iterator(self_data.as_deref()),
Self::sorted_iterator(other_state.sparse_data.as_deref()),
)?;
}
// TODO: Merge without risking to grow this representation above its maximum size.
return Ok(self.update_representation(state)?);
}
fn merge_and_set<Iter1, Iter2>(
&self,
state: &mut State,
mut l: Iter1,
mut r: Iter2,
) -> Result<()>
where
Iter1: Iterator<Item = Result<u32>>,
Iter2: Iterator<Item = Result<u32>>,
{
let mut data = Vec::new();
struct MergeState<'a> {
encoder: DifferenceEncoder<'a>,
size: i32,
}
impl MergeState<'_> {
fn put_int(&mut self, v: u32) {
self.encoder.put_int(v);
self.size += 1;
}
fn consume<Iter: Iterator<Item = Result<u32>>>(&mut self, mut it: Iter) -> Result<()> {
while let Some(v) = it.next().transpose()? {
self.encoder.put_int(v);
self.size += 1;
}
Ok(())
}
}
let mut s = MergeState {
encoder: DifferenceEncoder::new(&mut data),
size: 0,
};
// First iteration.
let (mut lv, mut rv) = match (l.next().transpose()?, r.next().transpose()?) {
(None, None) => {
let size = s.size;
return Self::set_sparse(state, data, size);
}
(Some(v), None) => {
s.put_int(v);
s.consume(l)?;
let size = s.size;
return Self::set_sparse(state, data, size);
}
(None, Some(v)) => {
s.put_int(v);
s.consume(r)?;
let size = s.size;
return Self::set_sparse(state, data, size);
}
(Some(lv), Some(rv)) => (lv, rv),
};
let mut last = min(lv, rv);
let mut last_index = self.encoding.decode_sparse_index(last as i32);
loop {
let next = min(lv, rv);
let next_index = self.encoding.decode_sparse_index(next as i32);
if last_index != next_index {
s.put_int(last)
}
last = next;
last_index = next_index;
if lv < rv {
match l.next().transpose()? {
Some(v) => lv = v,
None => {
if self.encoding.decode_sparse_index(rv as i32) != last_index {
s.put_int(last)
}
s.put_int(rv);
s.consume(r)?;
break;
}
}
} else {
match r.next().transpose()? {
Some(v) => rv = v,
None => {
if self.encoding.decode_sparse_index(lv as i32) != last_index {
s.put_int(last)
}
s.put_int(lv);
s.consume(l)?;
break;
}
}
}
}
let size = s.size;
return Self::set_sparse(state, data, size);
}
fn set_sparse(state: &mut State, data: Vec<u8>, size: i32) -> Result<()> {
state.sparse_data = Some(data);
state.sparse_size = size;
Ok(())
}
pub(crate) fn sorted_iterator(sparse_data: Option<&[u8]>) -> DifferenceDecoder {
return DifferenceDecoder::new(sparse_data.unwrap_or(&[]));
}
fn buffer_iterator<'a>(&'a self) -> impl Iterator<Item = Result<u32>> + 'a {
self.buffer.iter().map(|v| Ok(*v))
}
/// Updates the sparse representation:
/// - If the temporary list has become too large, serialize it into the sparse bytes
/// representation.
/// - If the sparse representation has become too large, converts to a `NormalRepresentation`.
///
/// Returns a new normal representation if this sparse representation has outgrown itself or
/// `None` if the sparse representation can continue to be be used.
#[must_use]
fn update_representation(&mut self, state: &mut State) -> Result<Option<NormalRepresentation>> {
if (self.max_buffer_elements as usize) < self.buffer.len() {
self.flush_buffer(state)?;
}
// Upgrade to normal if the sparse data exceeds the maximum allowed amount of memory.
//
// Note that sparse_data will allocate a larger buffer on the heap (of size
// sparse_data.capacity()) than is actually occupied by the sparse encoding (of size
// sparse_data.len()), since we cannot efficiently anticipate how many bytes will be
// written when flushing the buffer. So in principle, we would need to compare
// sparse_data.capacity() with max_sparse_data_bytes here if we wanted to make sure that we never
// use too much memory at runtime. This would not be compatible with golden tests, though, which
// ensure that the representation upgrades to normal just before the *serialized* sparse format
// uses more memory than max_sparse_data_bytes. I.e., we would be upgrading to normal
// representation earlier than the golden tests.
if state.sparse_data.is_some()
&& state.sparse_data.as_ref().unwrap().len() > self.max_sparse_data_bytes as usize
{
return Ok(Some(self.normalize(state)?));
}
return Ok(None);
}
/// Convert to `NormalRepresentation`.
#[must_use]
fn normalize(&mut self, state: &mut State) -> Result<NormalRepresentation> {
let mut representation = NormalRepresentation::new(state).expect("programming error");
let sparse_data = state.sparse_data.take();
state.sparse_size = 0;
representation.add_sparse_values(
state,
self.encoding(),
Self::sorted_iterator(sparse_data.as_deref()),
)?;
if !self.buffer.is_empty() {
representation.add_sparse_values(state, self.encoding(), self.buffer_iterator())?;
self.buffer.clear();
}
return Ok(representation);
}
pub fn requires_compaction(&self) -> bool {
!self.buffer.is_empty()
}
pub fn compact(&mut self, state: &mut State) -> Result<()> {
self.flush_buffer(state)
}
fn flush_buffer(&mut self, state: &mut State) -> Result<()> {
if self.buffer.is_empty() {
return Ok(());
}
let data = state.sparse_data.take();
self.merge_and_set(
state,
Self::sorted_iterator(data.as_deref()),
self.buffer_iterator(),
)?;
self.buffer.clear();
return Ok(());
}
}
| 40.559322 | 105 | 0.598114 |
4b4381e1b088a1344894f29d3d50676fa9f17814 | 106,637 | // @generated by Thrift for src/module.thrift
// This file is probably not the place you want to edit!
#![recursion_limit = "100000000"]
#![allow(non_camel_case_types, non_snake_case, non_upper_case_globals, unused_crate_dependencies)]
pub use self::errors::*;
pub use self::types::*;
/// Thrift type definitions for `module`.
pub mod types {
#![allow(clippy::redundant_closure)]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Fiery {
pub message: ::std::string::String,
}
impl ::std::error::Error for Fiery {}
impl ::std::fmt::Display for Fiery {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Fiery: {}: {:?}", self.message, self)
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Serious {
pub sonnet: ::std::option::Option<::std::string::String>,
}
impl ::std::error::Error for Serious {}
impl ::std::fmt::Display for Serious {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "Serious: {:?}: {:?}", self.sonnet, self)
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ComplexFieldNames {
pub error_message: ::std::string::String,
pub internal_error_message: ::std::string::String,
}
impl ::std::error::Error for ComplexFieldNames {}
impl ::std::fmt::Display for ComplexFieldNames {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "ComplexFieldNames: {}: {:?}", self.internal_error_message, self)
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct CustomFieldNames {
pub error_message: ::std::string::String,
pub internal_error_message: ::std::string::String,
}
impl ::std::error::Error for CustomFieldNames {}
impl ::std::fmt::Display for CustomFieldNames {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "CustomFieldNames: {}: {:?}", self.internal_error_message, self)
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ExceptionWithPrimitiveField {
pub message: ::std::string::String,
pub error_code: ::std::primitive::i32,
}
impl ::std::error::Error for ExceptionWithPrimitiveField {}
impl ::std::fmt::Display for ExceptionWithPrimitiveField {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "ExceptionWithPrimitiveField: {}: {:?}", self.message, self)
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Banal {
}
impl ::std::error::Error for Banal {}
impl ::std::fmt::Display for Banal {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl ::std::default::Default for self::Fiery {
fn default() -> Self {
Self {
message: ::std::default::Default::default(),
}
}
}
unsafe impl ::std::marker::Send for self::Fiery {}
unsafe impl ::std::marker::Sync for self::Fiery {}
impl ::fbthrift::GetTType for self::Fiery {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for self::Fiery
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("Fiery");
p.write_field_begin("message", ::fbthrift::TType::String, 1);
::fbthrift::Serialize::write(&self.message, p);
p.write_field_end();
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for self::Fiery
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static FIELDS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("message", ::fbthrift::TType::String, 1),
];
let mut field_message = ::std::option::Option::None;
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), FIELDS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(::fbthrift::TType::String, 1) => field_message = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(Self {
message: field_message.unwrap_or_default(),
})
}
}
impl ::std::default::Default for self::Serious {
fn default() -> Self {
Self {
sonnet: ::std::option::Option::None,
}
}
}
unsafe impl ::std::marker::Send for self::Serious {}
unsafe impl ::std::marker::Sync for self::Serious {}
impl ::fbthrift::GetTType for self::Serious {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for self::Serious
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("Serious");
if let ::std::option::Option::Some(some) = &self.sonnet {
p.write_field_begin("sonnet", ::fbthrift::TType::String, 1);
::fbthrift::Serialize::write(some, p);
p.write_field_end();
}
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for self::Serious
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static FIELDS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("sonnet", ::fbthrift::TType::String, 1),
];
let mut field_sonnet = ::std::option::Option::None;
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), FIELDS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(::fbthrift::TType::String, 1) => field_sonnet = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(Self {
sonnet: field_sonnet,
})
}
}
impl ::std::default::Default for self::ComplexFieldNames {
fn default() -> Self {
Self {
error_message: ::std::default::Default::default(),
internal_error_message: ::std::default::Default::default(),
}
}
}
unsafe impl ::std::marker::Send for self::ComplexFieldNames {}
unsafe impl ::std::marker::Sync for self::ComplexFieldNames {}
impl ::fbthrift::GetTType for self::ComplexFieldNames {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for self::ComplexFieldNames
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("ComplexFieldNames");
p.write_field_begin("error_message", ::fbthrift::TType::String, 1);
::fbthrift::Serialize::write(&self.error_message, p);
p.write_field_end();
p.write_field_begin("internal_error_message", ::fbthrift::TType::String, 2);
::fbthrift::Serialize::write(&self.internal_error_message, p);
p.write_field_end();
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for self::ComplexFieldNames
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static FIELDS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("error_message", ::fbthrift::TType::String, 1),
::fbthrift::Field::new("internal_error_message", ::fbthrift::TType::String, 2),
];
let mut field_error_message = ::std::option::Option::None;
let mut field_internal_error_message = ::std::option::Option::None;
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), FIELDS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(::fbthrift::TType::String, 1) => field_error_message = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(::fbthrift::TType::String, 2) => field_internal_error_message = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(Self {
error_message: field_error_message.unwrap_or_default(),
internal_error_message: field_internal_error_message.unwrap_or_default(),
})
}
}
impl ::std::default::Default for self::CustomFieldNames {
fn default() -> Self {
Self {
error_message: ::std::default::Default::default(),
internal_error_message: ::std::default::Default::default(),
}
}
}
unsafe impl ::std::marker::Send for self::CustomFieldNames {}
unsafe impl ::std::marker::Sync for self::CustomFieldNames {}
impl ::fbthrift::GetTType for self::CustomFieldNames {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for self::CustomFieldNames
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("CustomFieldNames");
p.write_field_begin("error_message", ::fbthrift::TType::String, 1);
::fbthrift::Serialize::write(&self.error_message, p);
p.write_field_end();
p.write_field_begin("internal_error_message", ::fbthrift::TType::String, 2);
::fbthrift::Serialize::write(&self.internal_error_message, p);
p.write_field_end();
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for self::CustomFieldNames
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static FIELDS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("error_message", ::fbthrift::TType::String, 1),
::fbthrift::Field::new("internal_error_message", ::fbthrift::TType::String, 2),
];
let mut field_error_message = ::std::option::Option::None;
let mut field_internal_error_message = ::std::option::Option::None;
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), FIELDS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(::fbthrift::TType::String, 1) => field_error_message = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(::fbthrift::TType::String, 2) => field_internal_error_message = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(Self {
error_message: field_error_message.unwrap_or_default(),
internal_error_message: field_internal_error_message.unwrap_or_default(),
})
}
}
impl ::std::default::Default for self::ExceptionWithPrimitiveField {
fn default() -> Self {
Self {
message: ::std::default::Default::default(),
error_code: ::std::default::Default::default(),
}
}
}
unsafe impl ::std::marker::Send for self::ExceptionWithPrimitiveField {}
unsafe impl ::std::marker::Sync for self::ExceptionWithPrimitiveField {}
impl ::fbthrift::GetTType for self::ExceptionWithPrimitiveField {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for self::ExceptionWithPrimitiveField
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("ExceptionWithPrimitiveField");
p.write_field_begin("message", ::fbthrift::TType::String, 1);
::fbthrift::Serialize::write(&self.message, p);
p.write_field_end();
p.write_field_begin("error_code", ::fbthrift::TType::I32, 2);
::fbthrift::Serialize::write(&self.error_code, p);
p.write_field_end();
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for self::ExceptionWithPrimitiveField
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static FIELDS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("error_code", ::fbthrift::TType::I32, 2),
::fbthrift::Field::new("message", ::fbthrift::TType::String, 1),
];
let mut field_message = ::std::option::Option::None;
let mut field_error_code = ::std::option::Option::None;
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), FIELDS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(::fbthrift::TType::String, 1) => field_message = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(::fbthrift::TType::I32, 2) => field_error_code = ::std::option::Option::Some(::fbthrift::Deserialize::read(p)?),
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(Self {
message: field_message.unwrap_or_default(),
error_code: field_error_code.unwrap_or_default(),
})
}
}
impl ::std::default::Default for self::Banal {
fn default() -> Self {
Self {
}
}
}
unsafe impl ::std::marker::Send for self::Banal {}
unsafe impl ::std::marker::Sync for self::Banal {}
impl ::fbthrift::GetTType for self::Banal {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for self::Banal
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("Banal");
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for self::Banal
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static FIELDS: &[::fbthrift::Field] = &[
];
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), FIELDS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(Self {
})
}
}
}
#[doc(hidden)]
pub mod dependencies {
}
pub mod services {
pub mod raiser {
#[derive(Clone, Debug)]
pub enum DoBlandExn {
Success(()),
ApplicationException(::fbthrift::ApplicationException),
}
impl ::std::convert::From<::fbthrift::ApplicationException> for DoBlandExn {
fn from(exn: ::fbthrift::ApplicationException) -> Self {
DoBlandExn::ApplicationException(exn)
}
}
impl ::fbthrift::GetTType for DoBlandExn {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for DoBlandExn
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("DoBland");
match self {
DoBlandExn::Success(inner) => {
p.write_field_begin(
"Success",
::fbthrift::TType::Void,
0i16,
);
inner.write(p);
p.write_field_end();
}
DoBlandExn::ApplicationException(_) => panic!(
"Bad union Alt field {} id {}",
"ApplicationException",
-2147483648i32,
),
}
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for DoBlandExn
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static RETURNS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("Success", ::fbthrift::TType::Void, 0),
];
let _ = p.read_struct_begin(|_| ())?;
let mut once = false;
let mut alt = DoBlandExn::Success(());
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), RETURNS)?;
match ((fty, fid as ::std::primitive::i32), once) {
((::fbthrift::TType::Stop, _), _) => {
p.read_field_end()?;
break;
}
((::fbthrift::TType::Void, 0i32), false) => {
once = true;
alt = DoBlandExn::Success(::fbthrift::Deserialize::read(p)?);
}
((ty, _id), false) => p.skip(ty)?,
((badty, badid), true) => return ::std::result::Result::Err(::std::convert::From::from(
::fbthrift::ApplicationException::new(
::fbthrift::ApplicationExceptionErrorCode::ProtocolError,
format!(
"unwanted extra union {} field ty {:?} id {}",
"DoBlandExn",
badty,
badid,
),
)
)),
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(alt)
}
}
#[derive(Clone, Debug)]
pub enum DoRaiseExn {
Success(()),
b(crate::types::Banal),
f(crate::types::Fiery),
s(crate::types::Serious),
ApplicationException(::fbthrift::ApplicationException),
}
impl ::std::convert::From<crate::types::Banal> for DoRaiseExn {
fn from(exn: crate::types::Banal) -> Self {
DoRaiseExn::b(exn)
}
}
impl ::std::convert::From<crate::types::Fiery> for DoRaiseExn {
fn from(exn: crate::types::Fiery) -> Self {
DoRaiseExn::f(exn)
}
}
impl ::std::convert::From<crate::types::Serious> for DoRaiseExn {
fn from(exn: crate::types::Serious) -> Self {
DoRaiseExn::s(exn)
}
}
impl ::std::convert::From<::fbthrift::ApplicationException> for DoRaiseExn {
fn from(exn: ::fbthrift::ApplicationException) -> Self {
DoRaiseExn::ApplicationException(exn)
}
}
impl ::fbthrift::GetTType for DoRaiseExn {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for DoRaiseExn
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("DoRaise");
match self {
DoRaiseExn::Success(inner) => {
p.write_field_begin(
"Success",
::fbthrift::TType::Void,
0i16,
);
inner.write(p);
p.write_field_end();
}
DoRaiseExn::b(inner) => {
p.write_field_begin(
"b",
::fbthrift::TType::Struct,
1,
);
inner.write(p);
p.write_field_end();
}
DoRaiseExn::f(inner) => {
p.write_field_begin(
"f",
::fbthrift::TType::Struct,
2,
);
inner.write(p);
p.write_field_end();
}
DoRaiseExn::s(inner) => {
p.write_field_begin(
"s",
::fbthrift::TType::Struct,
3,
);
inner.write(p);
p.write_field_end();
}
DoRaiseExn::ApplicationException(_) => panic!(
"Bad union Alt field {} id {}",
"ApplicationException",
-2147483648i32,
),
}
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for DoRaiseExn
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static RETURNS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("Success", ::fbthrift::TType::Void, 0),
::fbthrift::Field::new("b", ::fbthrift::TType::Struct, 1),
::fbthrift::Field::new("f", ::fbthrift::TType::Struct, 2),
::fbthrift::Field::new("s", ::fbthrift::TType::Struct, 3),
];
let _ = p.read_struct_begin(|_| ())?;
let mut once = false;
let mut alt = DoRaiseExn::Success(());
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), RETURNS)?;
match ((fty, fid as ::std::primitive::i32), once) {
((::fbthrift::TType::Stop, _), _) => {
p.read_field_end()?;
break;
}
((::fbthrift::TType::Void, 0i32), false) => {
once = true;
alt = DoRaiseExn::Success(::fbthrift::Deserialize::read(p)?);
}
((::fbthrift::TType::Struct, 1), false) => {
once = true;
alt = DoRaiseExn::b(::fbthrift::Deserialize::read(p)?);
}
((::fbthrift::TType::Struct, 2), false) => {
once = true;
alt = DoRaiseExn::f(::fbthrift::Deserialize::read(p)?);
}
((::fbthrift::TType::Struct, 3), false) => {
once = true;
alt = DoRaiseExn::s(::fbthrift::Deserialize::read(p)?);
}
((ty, _id), false) => p.skip(ty)?,
((badty, badid), true) => return ::std::result::Result::Err(::std::convert::From::from(
::fbthrift::ApplicationException::new(
::fbthrift::ApplicationExceptionErrorCode::ProtocolError,
format!(
"unwanted extra union {} field ty {:?} id {}",
"DoRaiseExn",
badty,
badid,
),
)
)),
}
p.read_field_end()?;
}
p.read_struct_end()?;
::std::result::Result::Ok(alt)
}
}
#[derive(Clone, Debug)]
pub enum Get200Exn {
Success(::std::string::String),
ApplicationException(::fbthrift::ApplicationException),
}
impl ::std::convert::From<::fbthrift::ApplicationException> for Get200Exn {
fn from(exn: ::fbthrift::ApplicationException) -> Self {
Get200Exn::ApplicationException(exn)
}
}
impl ::fbthrift::GetTType for Get200Exn {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for Get200Exn
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("Get200");
match self {
Get200Exn::Success(inner) => {
p.write_field_begin(
"Success",
::fbthrift::TType::String,
0i16,
);
inner.write(p);
p.write_field_end();
}
Get200Exn::ApplicationException(_) => panic!(
"Bad union Alt field {} id {}",
"ApplicationException",
-2147483648i32,
),
}
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for Get200Exn
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static RETURNS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("Success", ::fbthrift::TType::String, 0),
];
let _ = p.read_struct_begin(|_| ())?;
let mut once = false;
let mut alt = ::std::option::Option::None;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), RETURNS)?;
match ((fty, fid as ::std::primitive::i32), once) {
((::fbthrift::TType::Stop, _), _) => {
p.read_field_end()?;
break;
}
((::fbthrift::TType::String, 0i32), false) => {
once = true;
alt = ::std::option::Option::Some(Get200Exn::Success(::fbthrift::Deserialize::read(p)?));
}
((ty, _id), false) => p.skip(ty)?,
((badty, badid), true) => return ::std::result::Result::Err(::std::convert::From::from(
::fbthrift::ApplicationException::new(
::fbthrift::ApplicationExceptionErrorCode::ProtocolError,
format!(
"unwanted extra union {} field ty {:?} id {}",
"Get200Exn",
badty,
badid,
),
)
)),
}
p.read_field_end()?;
}
p.read_struct_end()?;
alt.ok_or_else(||
::fbthrift::ApplicationException::new(
::fbthrift::ApplicationExceptionErrorCode::MissingResult,
format!("Empty union {}", "Get200Exn"),
)
.into(),
)
}
}
#[derive(Clone, Debug)]
pub enum Get500Exn {
Success(::std::string::String),
f(crate::types::Fiery),
b(crate::types::Banal),
s(crate::types::Serious),
ApplicationException(::fbthrift::ApplicationException),
}
impl ::std::convert::From<crate::types::Fiery> for Get500Exn {
fn from(exn: crate::types::Fiery) -> Self {
Get500Exn::f(exn)
}
}
impl ::std::convert::From<crate::types::Banal> for Get500Exn {
fn from(exn: crate::types::Banal) -> Self {
Get500Exn::b(exn)
}
}
impl ::std::convert::From<crate::types::Serious> for Get500Exn {
fn from(exn: crate::types::Serious) -> Self {
Get500Exn::s(exn)
}
}
impl ::std::convert::From<::fbthrift::ApplicationException> for Get500Exn {
fn from(exn: ::fbthrift::ApplicationException) -> Self {
Get500Exn::ApplicationException(exn)
}
}
impl ::fbthrift::GetTType for Get500Exn {
const TTYPE: ::fbthrift::TType = ::fbthrift::TType::Struct;
}
impl<P> ::fbthrift::Serialize<P> for Get500Exn
where
P: ::fbthrift::ProtocolWriter,
{
fn write(&self, p: &mut P) {
p.write_struct_begin("Get500");
match self {
Get500Exn::Success(inner) => {
p.write_field_begin(
"Success",
::fbthrift::TType::String,
0i16,
);
inner.write(p);
p.write_field_end();
}
Get500Exn::f(inner) => {
p.write_field_begin(
"f",
::fbthrift::TType::Struct,
1,
);
inner.write(p);
p.write_field_end();
}
Get500Exn::b(inner) => {
p.write_field_begin(
"b",
::fbthrift::TType::Struct,
2,
);
inner.write(p);
p.write_field_end();
}
Get500Exn::s(inner) => {
p.write_field_begin(
"s",
::fbthrift::TType::Struct,
3,
);
inner.write(p);
p.write_field_end();
}
Get500Exn::ApplicationException(_) => panic!(
"Bad union Alt field {} id {}",
"ApplicationException",
-2147483648i32,
),
}
p.write_field_stop();
p.write_struct_end();
}
}
impl<P> ::fbthrift::Deserialize<P> for Get500Exn
where
P: ::fbthrift::ProtocolReader,
{
fn read(p: &mut P) -> ::anyhow::Result<Self> {
static RETURNS: &[::fbthrift::Field] = &[
::fbthrift::Field::new("Success", ::fbthrift::TType::String, 0),
::fbthrift::Field::new("b", ::fbthrift::TType::Struct, 2),
::fbthrift::Field::new("f", ::fbthrift::TType::Struct, 1),
::fbthrift::Field::new("s", ::fbthrift::TType::Struct, 3),
];
let _ = p.read_struct_begin(|_| ())?;
let mut once = false;
let mut alt = ::std::option::Option::None;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), RETURNS)?;
match ((fty, fid as ::std::primitive::i32), once) {
((::fbthrift::TType::Stop, _), _) => {
p.read_field_end()?;
break;
}
((::fbthrift::TType::String, 0i32), false) => {
once = true;
alt = ::std::option::Option::Some(Get500Exn::Success(::fbthrift::Deserialize::read(p)?));
}
((::fbthrift::TType::Struct, 1), false) => {
once = true;
alt = ::std::option::Option::Some(Get500Exn::f(::fbthrift::Deserialize::read(p)?));
}
((::fbthrift::TType::Struct, 2), false) => {
once = true;
alt = ::std::option::Option::Some(Get500Exn::b(::fbthrift::Deserialize::read(p)?));
}
((::fbthrift::TType::Struct, 3), false) => {
once = true;
alt = ::std::option::Option::Some(Get500Exn::s(::fbthrift::Deserialize::read(p)?));
}
((ty, _id), false) => p.skip(ty)?,
((badty, badid), true) => return ::std::result::Result::Err(::std::convert::From::from(
::fbthrift::ApplicationException::new(
::fbthrift::ApplicationExceptionErrorCode::ProtocolError,
format!(
"unwanted extra union {} field ty {:?} id {}",
"Get500Exn",
badty,
badid,
),
)
)),
}
p.read_field_end()?;
}
p.read_struct_end()?;
alt.ok_or_else(||
::fbthrift::ApplicationException::new(
::fbthrift::ApplicationExceptionErrorCode::MissingResult,
format!("Empty union {}", "Get500Exn"),
)
.into(),
)
}
}
}
}
/// Client implementation for each service in `module`.
pub mod client {
pub struct RaiserImpl<P, T> {
transport: T,
_phantom: ::std::marker::PhantomData<fn() -> P>,
}
impl<P, T> RaiserImpl<P, T> {
pub fn new(
transport: T,
) -> Self {
Self {
transport,
_phantom: ::std::marker::PhantomData,
}
}
pub fn transport(&self) -> &T {
&self.transport
}
}
pub trait Raiser: ::std::marker::Send {
fn doBland(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoBlandError>> + ::std::marker::Send + 'static>>;
fn doRaise(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoRaiseError>> + ::std::marker::Send + 'static>>;
fn get200(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get200Error>> + ::std::marker::Send + 'static>>;
fn get500(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get500Error>> + ::std::marker::Send + 'static>>;
}
impl<P, T> Raiser for RaiserImpl<P, T>
where
P: ::fbthrift::Protocol,
T: ::fbthrift::Transport,
P::Frame: ::fbthrift::Framing<DecBuf = ::fbthrift::FramingDecoded<T>>,
::fbthrift::ProtocolEncoded<P>: ::fbthrift::BufMutExt<Final = ::fbthrift::FramingEncodedFinal<T>>,
P::Deserializer: ::std::marker::Send,
{ fn doBland(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoBlandError>> + ::std::marker::Send + 'static>> {
use ::const_cstr::const_cstr;
use ::fbthrift::{ProtocolWriter as _};
use ::futures::future::{FutureExt as _, TryFutureExt as _};
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.doBland";
}
let request = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"doBland",
::fbthrift::MessageType::Call,
// Note: we send a 0 message sequence ID from clients because
// this field should not be used by the server (except for some
// language implementations).
0,
|p| {
p.write_struct_begin("args");
p.write_field_stop();
p.write_struct_end();
},
));
self.transport()
.call(SERVICE_NAME, METHOD_NAME, request)
.map_err(::std::convert::From::from)
.and_then(|reply| {
let de = P::deserializer(reply);
(move |mut p: P::Deserializer| {
use ::fbthrift::{ProtocolReader as _};
let (_, message_type, _) = match p.read_message_begin(|_| ()) {
Ok(res) => res,
Err(e) => return ::futures::future::Either::Left(
::futures::future::ready(
::std::result::Result::Err(e.into())
)
)
};
match message_type {
::fbthrift::MessageType::Reply => {
let exn: ::tokio_shim::task::JoinHandle<(Result<crate::services::raiser::DoBlandExn, _>, _)> = ::tokio_shim::task::spawn_blocking_fallback_inline(move || {
(::fbthrift::Deserialize::read(&mut p), p)
});
::futures::future::Either::Right(exn.then(
|exn| {
let result = (move || {
let (exn, mut p) = match exn {
Ok(res) => res,
Err(e) => {
// spawn_blocking threads can't be cancelled, so any
// error is a panic. This shouldn't happen, but we propagate if it does
::std::panic::resume_unwind(e.into_panic())
}
};
let exn = exn?;
let result = match exn {
crate::services::raiser::DoBlandExn::Success(x) => ::std::result::Result::Ok(x),
crate::services::raiser::DoBlandExn::ApplicationException(ae) => {
::std::result::Result::Err(crate::errors::raiser::DoBlandError::ApplicationException(ae))
}
};
p.read_message_end()?;
result
})();
::futures::future::ready(result)
}
))
}
::fbthrift::MessageType::Exception => {
let ae: ::std::result::Result<::fbthrift::ApplicationException, _> = ::fbthrift::Deserialize::read(&mut p);
::futures::future::Either::Left(
::futures::future::ready(
ae.map_err(|e| e.into()).and_then(|ae| {
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::DoBlandError::ApplicationException(ae))
}
)
})
)
)
}
::fbthrift::MessageType::Call | ::fbthrift::MessageType::Oneway | ::fbthrift::MessageType::InvalidMessageType => {
let err = ::anyhow::anyhow!("Unexpected message type {:?}", message_type);
::futures::future::Either::Left(
::futures::future::ready(
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::DoBlandError::ThriftError(err))
}
)
)
)
}
}
})(de)
})
.boxed()
}
fn doRaise(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoRaiseError>> + ::std::marker::Send + 'static>> {
use ::const_cstr::const_cstr;
use ::fbthrift::{ProtocolWriter as _};
use ::futures::future::{FutureExt as _, TryFutureExt as _};
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.doRaise";
}
let request = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"doRaise",
::fbthrift::MessageType::Call,
// Note: we send a 0 message sequence ID from clients because
// this field should not be used by the server (except for some
// language implementations).
0,
|p| {
p.write_struct_begin("args");
p.write_field_stop();
p.write_struct_end();
},
));
self.transport()
.call(SERVICE_NAME, METHOD_NAME, request)
.map_err(::std::convert::From::from)
.and_then(|reply| {
let de = P::deserializer(reply);
(move |mut p: P::Deserializer| {
use ::fbthrift::{ProtocolReader as _};
let (_, message_type, _) = match p.read_message_begin(|_| ()) {
Ok(res) => res,
Err(e) => return ::futures::future::Either::Left(
::futures::future::ready(
::std::result::Result::Err(e.into())
)
)
};
match message_type {
::fbthrift::MessageType::Reply => {
let exn: ::tokio_shim::task::JoinHandle<(Result<crate::services::raiser::DoRaiseExn, _>, _)> = ::tokio_shim::task::spawn_blocking_fallback_inline(move || {
(::fbthrift::Deserialize::read(&mut p), p)
});
::futures::future::Either::Right(exn.then(
|exn| {
let result = (move || {
let (exn, mut p) = match exn {
Ok(res) => res,
Err(e) => {
// spawn_blocking threads can't be cancelled, so any
// error is a panic. This shouldn't happen, but we propagate if it does
::std::panic::resume_unwind(e.into_panic())
}
};
let exn = exn?;
let result = match exn {
crate::services::raiser::DoRaiseExn::Success(x) => ::std::result::Result::Ok(x),
crate::services::raiser::DoRaiseExn::b(err) => {
::std::result::Result::Err(crate::errors::raiser::DoRaiseError::b(err))
}
crate::services::raiser::DoRaiseExn::f(err) => {
::std::result::Result::Err(crate::errors::raiser::DoRaiseError::f(err))
}
crate::services::raiser::DoRaiseExn::s(err) => {
::std::result::Result::Err(crate::errors::raiser::DoRaiseError::s(err))
}
crate::services::raiser::DoRaiseExn::ApplicationException(ae) => {
::std::result::Result::Err(crate::errors::raiser::DoRaiseError::ApplicationException(ae))
}
};
p.read_message_end()?;
result
})();
::futures::future::ready(result)
}
))
}
::fbthrift::MessageType::Exception => {
let ae: ::std::result::Result<::fbthrift::ApplicationException, _> = ::fbthrift::Deserialize::read(&mut p);
::futures::future::Either::Left(
::futures::future::ready(
ae.map_err(|e| e.into()).and_then(|ae| {
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::DoRaiseError::ApplicationException(ae))
}
)
})
)
)
}
::fbthrift::MessageType::Call | ::fbthrift::MessageType::Oneway | ::fbthrift::MessageType::InvalidMessageType => {
let err = ::anyhow::anyhow!("Unexpected message type {:?}", message_type);
::futures::future::Either::Left(
::futures::future::ready(
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::DoRaiseError::ThriftError(err))
}
)
)
)
}
}
})(de)
})
.boxed()
}
fn get200(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get200Error>> + ::std::marker::Send + 'static>> {
use ::const_cstr::const_cstr;
use ::fbthrift::{ProtocolWriter as _};
use ::futures::future::{FutureExt as _, TryFutureExt as _};
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.get200";
}
let request = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"get200",
::fbthrift::MessageType::Call,
// Note: we send a 0 message sequence ID from clients because
// this field should not be used by the server (except for some
// language implementations).
0,
|p| {
p.write_struct_begin("args");
p.write_field_stop();
p.write_struct_end();
},
));
self.transport()
.call(SERVICE_NAME, METHOD_NAME, request)
.map_err(::std::convert::From::from)
.and_then(|reply| {
let de = P::deserializer(reply);
(move |mut p: P::Deserializer| {
use ::fbthrift::{ProtocolReader as _};
let (_, message_type, _) = match p.read_message_begin(|_| ()) {
Ok(res) => res,
Err(e) => return ::futures::future::Either::Left(
::futures::future::ready(
::std::result::Result::Err(e.into())
)
)
};
match message_type {
::fbthrift::MessageType::Reply => {
let exn: ::tokio_shim::task::JoinHandle<(Result<crate::services::raiser::Get200Exn, _>, _)> = ::tokio_shim::task::spawn_blocking_fallback_inline(move || {
(::fbthrift::Deserialize::read(&mut p), p)
});
::futures::future::Either::Right(exn.then(
|exn| {
let result = (move || {
let (exn, mut p) = match exn {
Ok(res) => res,
Err(e) => {
// spawn_blocking threads can't be cancelled, so any
// error is a panic. This shouldn't happen, but we propagate if it does
::std::panic::resume_unwind(e.into_panic())
}
};
let exn = exn?;
let result = match exn {
crate::services::raiser::Get200Exn::Success(x) => ::std::result::Result::Ok(x),
crate::services::raiser::Get200Exn::ApplicationException(ae) => {
::std::result::Result::Err(crate::errors::raiser::Get200Error::ApplicationException(ae))
}
};
p.read_message_end()?;
result
})();
::futures::future::ready(result)
}
))
}
::fbthrift::MessageType::Exception => {
let ae: ::std::result::Result<::fbthrift::ApplicationException, _> = ::fbthrift::Deserialize::read(&mut p);
::futures::future::Either::Left(
::futures::future::ready(
ae.map_err(|e| e.into()).and_then(|ae| {
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::Get200Error::ApplicationException(ae))
}
)
})
)
)
}
::fbthrift::MessageType::Call | ::fbthrift::MessageType::Oneway | ::fbthrift::MessageType::InvalidMessageType => {
let err = ::anyhow::anyhow!("Unexpected message type {:?}", message_type);
::futures::future::Either::Left(
::futures::future::ready(
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::Get200Error::ThriftError(err))
}
)
)
)
}
}
})(de)
})
.boxed()
}
fn get500(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get500Error>> + ::std::marker::Send + 'static>> {
use ::const_cstr::const_cstr;
use ::fbthrift::{ProtocolWriter as _};
use ::futures::future::{FutureExt as _, TryFutureExt as _};
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.get500";
}
let request = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"get500",
::fbthrift::MessageType::Call,
// Note: we send a 0 message sequence ID from clients because
// this field should not be used by the server (except for some
// language implementations).
0,
|p| {
p.write_struct_begin("args");
p.write_field_stop();
p.write_struct_end();
},
));
self.transport()
.call(SERVICE_NAME, METHOD_NAME, request)
.map_err(::std::convert::From::from)
.and_then(|reply| {
let de = P::deserializer(reply);
(move |mut p: P::Deserializer| {
use ::fbthrift::{ProtocolReader as _};
let (_, message_type, _) = match p.read_message_begin(|_| ()) {
Ok(res) => res,
Err(e) => return ::futures::future::Either::Left(
::futures::future::ready(
::std::result::Result::Err(e.into())
)
)
};
match message_type {
::fbthrift::MessageType::Reply => {
let exn: ::tokio_shim::task::JoinHandle<(Result<crate::services::raiser::Get500Exn, _>, _)> = ::tokio_shim::task::spawn_blocking_fallback_inline(move || {
(::fbthrift::Deserialize::read(&mut p), p)
});
::futures::future::Either::Right(exn.then(
|exn| {
let result = (move || {
let (exn, mut p) = match exn {
Ok(res) => res,
Err(e) => {
// spawn_blocking threads can't be cancelled, so any
// error is a panic. This shouldn't happen, but we propagate if it does
::std::panic::resume_unwind(e.into_panic())
}
};
let exn = exn?;
let result = match exn {
crate::services::raiser::Get500Exn::Success(x) => ::std::result::Result::Ok(x),
crate::services::raiser::Get500Exn::f(err) => {
::std::result::Result::Err(crate::errors::raiser::Get500Error::f(err))
}
crate::services::raiser::Get500Exn::b(err) => {
::std::result::Result::Err(crate::errors::raiser::Get500Error::b(err))
}
crate::services::raiser::Get500Exn::s(err) => {
::std::result::Result::Err(crate::errors::raiser::Get500Error::s(err))
}
crate::services::raiser::Get500Exn::ApplicationException(ae) => {
::std::result::Result::Err(crate::errors::raiser::Get500Error::ApplicationException(ae))
}
};
p.read_message_end()?;
result
})();
::futures::future::ready(result)
}
))
}
::fbthrift::MessageType::Exception => {
let ae: ::std::result::Result<::fbthrift::ApplicationException, _> = ::fbthrift::Deserialize::read(&mut p);
::futures::future::Either::Left(
::futures::future::ready(
ae.map_err(|e| e.into()).and_then(|ae| {
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::Get500Error::ApplicationException(ae))
}
)
})
)
)
}
::fbthrift::MessageType::Call | ::fbthrift::MessageType::Oneway | ::fbthrift::MessageType::InvalidMessageType => {
let err = ::anyhow::anyhow!("Unexpected message type {:?}", message_type);
::futures::future::Either::Left(
::futures::future::ready(
p.read_message_end().map_err(|e| e.into()).and_then(
|_| {
::std::result::Result::Err(crate::errors::raiser::Get500Error::ThriftError(err))
}
)
)
)
}
}
})(de)
})
.boxed()
}
}
impl<'a, T> Raiser for T
where
T: ::std::convert::AsRef<dyn Raiser + 'a>,
T: ::std::marker::Send,
{
fn doBland(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoBlandError>> + ::std::marker::Send + 'static>> {
self.as_ref().doBland(
)
}
fn doRaise(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoRaiseError>> + ::std::marker::Send + 'static>> {
self.as_ref().doRaise(
)
}
fn get200(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get200Error>> + ::std::marker::Send + 'static>> {
self.as_ref().get200(
)
}
fn get500(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get500Error>> + ::std::marker::Send + 'static>> {
self.as_ref().get500(
)
}
}
#[derive(Clone)]
pub struct make_Raiser;
/// To be called by user directly setting up a client. Avoids
/// needing ClientFactory trait in scope, avoids unidiomatic
/// make_Trait name.
///
/// ```
/// # const _: &str = stringify! {
/// use bgs::client::BuckGraphService;
///
/// let protocol = BinaryProtocol::new();
/// let transport = HttpClient::new();
/// let client = <dyn BuckGraphService>::new(protocol, transport);
/// # };
/// ```
impl dyn Raiser {
pub fn new<P, T>(
protocol: P,
transport: T,
) -> ::std::sync::Arc<impl Raiser + ::std::marker::Send + 'static>
where
P: ::fbthrift::Protocol<Frame = T>,
T: ::fbthrift::Transport,
P::Deserializer: ::std::marker::Send,
{
let _ = protocol;
::std::sync::Arc::new(RaiserImpl::<P, T>::new(transport))
}
}
pub type RaiserDynClient = <make_Raiser as ::fbthrift::ClientFactory>::Api;
pub type RaiserClient = ::std::sync::Arc<RaiserDynClient>;
/// The same thing, but to be called from generic contexts where we are
/// working with a type parameter `C: ClientFactory` to produce clients.
impl ::fbthrift::ClientFactory for make_Raiser {
type Api = dyn Raiser + ::std::marker::Send + ::std::marker::Sync + 'static;
fn new<P, T>(protocol: P, transport: T) -> ::std::sync::Arc<Self::Api>
where
P: ::fbthrift::Protocol<Frame = T>,
T: ::fbthrift::Transport + ::std::marker::Sync,
P::Deserializer: ::std::marker::Send,
{
<dyn Raiser>::new(protocol, transport)
}
}
}
/// Server definitions for `module`.
pub mod server {
#[::async_trait::async_trait]
pub trait Raiser: ::std::marker::Send + ::std::marker::Sync + 'static {
async fn doBland(
&self,
) -> ::std::result::Result<(), crate::services::raiser::DoBlandExn> {
::std::result::Result::Err(crate::services::raiser::DoBlandExn::ApplicationException(
::fbthrift::ApplicationException::unimplemented_method(
"Raiser",
"doBland",
),
))
}
async fn doRaise(
&self,
) -> ::std::result::Result<(), crate::services::raiser::DoRaiseExn> {
::std::result::Result::Err(crate::services::raiser::DoRaiseExn::ApplicationException(
::fbthrift::ApplicationException::unimplemented_method(
"Raiser",
"doRaise",
),
))
}
async fn get200(
&self,
) -> ::std::result::Result<::std::string::String, crate::services::raiser::Get200Exn> {
::std::result::Result::Err(crate::services::raiser::Get200Exn::ApplicationException(
::fbthrift::ApplicationException::unimplemented_method(
"Raiser",
"get200",
),
))
}
async fn get500(
&self,
) -> ::std::result::Result<::std::string::String, crate::services::raiser::Get500Exn> {
::std::result::Result::Err(crate::services::raiser::Get500Exn::ApplicationException(
::fbthrift::ApplicationException::unimplemented_method(
"Raiser",
"get500",
),
))
}
}
/// Processor for Raiser's methods.
#[derive(Clone, Debug)]
pub struct RaiserProcessor<P, H, R> {
service: H,
supa: ::fbthrift::NullServiceProcessor<P, R>,
_phantom: ::std::marker::PhantomData<(P, H, R)>,
}
impl<P, H, R> RaiserProcessor<P, H, R>
where
P: ::fbthrift::Protocol + ::std::marker::Send + ::std::marker::Sync + 'static,
P::Deserializer: ::std::marker::Send,
H: Raiser,
R: ::fbthrift::RequestContext<Name = ::const_cstr::ConstCStr> + ::std::marker::Sync,
<R as ::fbthrift::RequestContext>::ContextStack: ::fbthrift::ContextStack + ::std::marker::Send + ::std::marker::Sync,
{
pub fn new(service: H) -> Self {
Self {
service,
supa: ::fbthrift::NullServiceProcessor::new(),
_phantom: ::std::marker::PhantomData,
}
}
pub fn into_inner(self) -> H {
self.service
}
async fn handle_doBland<'a>(
&'a self,
p: &'a mut P::Deserializer,
req_ctxt: &R,
seqid: ::std::primitive::u32,
) -> ::anyhow::Result<::fbthrift::ProtocolEncodedFinal<P>> {
use ::const_cstr::const_cstr;
use ::fbthrift::ProtocolReader as _;
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.doBland";
}
let mut ctx_stack = req_ctxt.get_context_stack(
&SERVICE_NAME,
&METHOD_NAME,
)?;
::fbthrift::ContextStack::pre_read(&mut ctx_stack)?;
static ARGS: &[::fbthrift::Field] = &[
];
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), ARGS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::fbthrift::ContextStack::post_read(&mut ctx_stack, 0)?;
let res = self.service.doBland(
).await;
let res = match res {
::std::result::Result::Ok(res) => {
crate::services::raiser::DoBlandExn::Success(res)
}
::std::result::Result::Err(crate::services::raiser::DoBlandExn::ApplicationException(aexn)) => {
req_ctxt.set_user_exception_header(::fbthrift::help::type_name_of_val(&aexn), &format!("{:?}", aexn))?;
return ::std::result::Result::Err(aexn.into())
}
::std::result::Result::Err(crate::services::raiser::DoBlandExn::Success(_)) => {
panic!(
"{} attempted to return success via error",
"doBland",
)
}
};
::fbthrift::ContextStack::pre_write(&mut ctx_stack)?;
let res = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"doBland",
::fbthrift::MessageType::Reply,
seqid,
|p| ::fbthrift::Serialize::write(&res, p),
));
::fbthrift::ContextStack::post_write(&mut ctx_stack, 0)?;
::std::result::Result::Ok(res)
}
async fn handle_doRaise<'a>(
&'a self,
p: &'a mut P::Deserializer,
req_ctxt: &R,
seqid: ::std::primitive::u32,
) -> ::anyhow::Result<::fbthrift::ProtocolEncodedFinal<P>> {
use ::const_cstr::const_cstr;
use ::fbthrift::ProtocolReader as _;
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.doRaise";
}
let mut ctx_stack = req_ctxt.get_context_stack(
&SERVICE_NAME,
&METHOD_NAME,
)?;
::fbthrift::ContextStack::pre_read(&mut ctx_stack)?;
static ARGS: &[::fbthrift::Field] = &[
];
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), ARGS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::fbthrift::ContextStack::post_read(&mut ctx_stack, 0)?;
let res = self.service.doRaise(
).await;
let res = match res {
::std::result::Result::Ok(res) => {
crate::services::raiser::DoRaiseExn::Success(res)
}
::std::result::Result::Err(crate::services::raiser::DoRaiseExn::ApplicationException(aexn)) => {
req_ctxt.set_user_exception_header(::fbthrift::help::type_name_of_val(&aexn), &format!("{:?}", aexn))?;
return ::std::result::Result::Err(aexn.into())
}
::std::result::Result::Err(crate::services::raiser::DoRaiseExn::Success(_)) => {
panic!(
"{} attempted to return success via error",
"doRaise",
)
}
::std::result::Result::Err(exn) => {
req_ctxt.set_user_exception_header(::fbthrift::help::type_name_of_val(&exn), &format!("{:?}", exn))?;
exn
}
};
::fbthrift::ContextStack::pre_write(&mut ctx_stack)?;
let res = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"doRaise",
::fbthrift::MessageType::Reply,
seqid,
|p| ::fbthrift::Serialize::write(&res, p),
));
::fbthrift::ContextStack::post_write(&mut ctx_stack, 0)?;
::std::result::Result::Ok(res)
}
async fn handle_get200<'a>(
&'a self,
p: &'a mut P::Deserializer,
req_ctxt: &R,
seqid: ::std::primitive::u32,
) -> ::anyhow::Result<::fbthrift::ProtocolEncodedFinal<P>> {
use ::const_cstr::const_cstr;
use ::fbthrift::ProtocolReader as _;
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.get200";
}
let mut ctx_stack = req_ctxt.get_context_stack(
&SERVICE_NAME,
&METHOD_NAME,
)?;
::fbthrift::ContextStack::pre_read(&mut ctx_stack)?;
static ARGS: &[::fbthrift::Field] = &[
];
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), ARGS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::fbthrift::ContextStack::post_read(&mut ctx_stack, 0)?;
let res = self.service.get200(
).await;
let res = match res {
::std::result::Result::Ok(res) => {
crate::services::raiser::Get200Exn::Success(res)
}
::std::result::Result::Err(crate::services::raiser::Get200Exn::ApplicationException(aexn)) => {
req_ctxt.set_user_exception_header(::fbthrift::help::type_name_of_val(&aexn), &format!("{:?}", aexn))?;
return ::std::result::Result::Err(aexn.into())
}
::std::result::Result::Err(crate::services::raiser::Get200Exn::Success(_)) => {
panic!(
"{} attempted to return success via error",
"get200",
)
}
};
::fbthrift::ContextStack::pre_write(&mut ctx_stack)?;
let res = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"get200",
::fbthrift::MessageType::Reply,
seqid,
|p| ::fbthrift::Serialize::write(&res, p),
));
::fbthrift::ContextStack::post_write(&mut ctx_stack, 0)?;
::std::result::Result::Ok(res)
}
async fn handle_get500<'a>(
&'a self,
p: &'a mut P::Deserializer,
req_ctxt: &R,
seqid: ::std::primitive::u32,
) -> ::anyhow::Result<::fbthrift::ProtocolEncodedFinal<P>> {
use ::const_cstr::const_cstr;
use ::fbthrift::ProtocolReader as _;
const_cstr! {
SERVICE_NAME = "Raiser";
METHOD_NAME = "Raiser.get500";
}
let mut ctx_stack = req_ctxt.get_context_stack(
&SERVICE_NAME,
&METHOD_NAME,
)?;
::fbthrift::ContextStack::pre_read(&mut ctx_stack)?;
static ARGS: &[::fbthrift::Field] = &[
];
let _ = p.read_struct_begin(|_| ())?;
loop {
let (_, fty, fid) = p.read_field_begin(|_| (), ARGS)?;
match (fty, fid as ::std::primitive::i32) {
(::fbthrift::TType::Stop, _) => break,
(fty, _) => p.skip(fty)?,
}
p.read_field_end()?;
}
p.read_struct_end()?;
::fbthrift::ContextStack::post_read(&mut ctx_stack, 0)?;
let res = self.service.get500(
).await;
let res = match res {
::std::result::Result::Ok(res) => {
crate::services::raiser::Get500Exn::Success(res)
}
::std::result::Result::Err(crate::services::raiser::Get500Exn::ApplicationException(aexn)) => {
req_ctxt.set_user_exception_header(::fbthrift::help::type_name_of_val(&aexn), &format!("{:?}", aexn))?;
return ::std::result::Result::Err(aexn.into())
}
::std::result::Result::Err(crate::services::raiser::Get500Exn::Success(_)) => {
panic!(
"{} attempted to return success via error",
"get500",
)
}
::std::result::Result::Err(exn) => {
req_ctxt.set_user_exception_header(::fbthrift::help::type_name_of_val(&exn), &format!("{:?}", exn))?;
exn
}
};
::fbthrift::ContextStack::pre_write(&mut ctx_stack)?;
let res = ::fbthrift::serialize!(P, |p| ::fbthrift::protocol::write_message(
p,
"get500",
::fbthrift::MessageType::Reply,
seqid,
|p| ::fbthrift::Serialize::write(&res, p),
));
::fbthrift::ContextStack::post_write(&mut ctx_stack, 0)?;
::std::result::Result::Ok(res)
}
}
#[::async_trait::async_trait]
impl<P, H, R> ::fbthrift::ServiceProcessor<P> for RaiserProcessor<P, H, R>
where
P: ::fbthrift::Protocol + ::std::marker::Send + ::std::marker::Sync + 'static,
P::Deserializer: ::std::marker::Send,
H: Raiser,
R: ::fbthrift::RequestContext<Name = ::const_cstr::ConstCStr> + ::std::marker::Send + ::std::marker::Sync + 'static,
<R as ::fbthrift::RequestContext>::ContextStack: ::fbthrift::ContextStack + ::std::marker::Send + ::std::marker::Sync + 'static
{
type RequestContext = R;
#[inline]
fn method_idx(&self, name: &[::std::primitive::u8]) -> ::std::result::Result<::std::primitive::usize, ::fbthrift::ApplicationException> {
match name {
b"doBland" => ::std::result::Result::Ok(0usize),
b"doRaise" => ::std::result::Result::Ok(1usize),
b"get200" => ::std::result::Result::Ok(2usize),
b"get500" => ::std::result::Result::Ok(3usize),
_ => ::std::result::Result::Err(::fbthrift::ApplicationException::unknown_method()),
}
}
async fn handle_method(
&self,
idx: ::std::primitive::usize,
_p: &mut P::Deserializer,
_r: &R,
_seqid: ::std::primitive::u32,
) -> ::anyhow::Result<::fbthrift::ProtocolEncodedFinal<P>> {
match idx {
0usize => self.handle_doBland(_p, _r, _seqid).await,
1usize => self.handle_doRaise(_p, _r, _seqid).await,
2usize => self.handle_get200(_p, _r, _seqid).await,
3usize => self.handle_get500(_p, _r, _seqid).await,
bad => panic!(
"{}: unexpected method idx {}",
"RaiserProcessor",
bad
),
}
}
}
#[::async_trait::async_trait]
impl<P, H, R> ::fbthrift::ThriftService<P::Frame> for RaiserProcessor<P, H, R>
where
P: ::fbthrift::Protocol + ::std::marker::Send + ::std::marker::Sync + 'static,
P::Deserializer: ::std::marker::Send,
P::Frame: ::std::marker::Send + 'static,
H: Raiser,
R: ::fbthrift::RequestContext<Name = ::const_cstr::ConstCStr> + ::std::marker::Send + ::std::marker::Sync + 'static,
<R as ::fbthrift::RequestContext>::ContextStack: ::fbthrift::ContextStack + ::std::marker::Send + ::std::marker::Sync + 'static
{
type Handler = H;
type RequestContext = R;
async fn call(
&self,
req: ::fbthrift::ProtocolDecoded<P>,
req_ctxt: &R,
) -> ::anyhow::Result<::fbthrift::ProtocolEncodedFinal<P>> {
use ::fbthrift::{BufExt as _, ProtocolReader as _, ServiceProcessor as _};
let mut p = P::deserializer(req);
let (idx, mty, seqid) = p.read_message_begin(|name| self.method_idx(name))?;
if mty != ::fbthrift::MessageType::Call {
return ::std::result::Result::Err(::std::convert::From::from(::fbthrift::ApplicationException::new(
::fbthrift::ApplicationExceptionErrorCode::InvalidMessageType,
format!("message type {:?} not handled", mty)
)));
}
let idx = match idx {
::std::result::Result::Ok(idx) => idx,
::std::result::Result::Err(_) => {
let cur = P::into_buffer(p).reset();
return self.supa.call(cur, req_ctxt).await;
}
};
let res = self.handle_method(idx, &mut p, req_ctxt, seqid).await;
p.read_message_end()?;
match res {
::std::result::Result::Ok(bytes) => ::std::result::Result::Ok(bytes),
::std::result::Result::Err(err) => match err.downcast_ref::<::fbthrift::ProtocolError>() {
::std::option::Option::Some(::fbthrift::ProtocolError::ApplicationException(ae)) => {
let res = ::fbthrift::serialize!(P, |p| {
::fbthrift::protocol::write_message(
p,
"RaiserProcessor",
::fbthrift::MessageType::Exception,
seqid,
|p| ::fbthrift::Serialize::write(&ae, p),
)
});
::std::result::Result::Ok(res)
}
_ => ::std::result::Result::Err(err),
},
}
}
}
/// Construct a new instance of a Raiser service.
///
/// This is called when a new instance of a Thrift service Processor
/// is needed for a particular Thrift protocol.
pub fn make_Raiser_server<F, H, R>(
proto: ::fbthrift::ProtocolID,
handler: H,
) -> ::std::result::Result<::std::boxed::Box<dyn ::fbthrift::ThriftService<F, Handler = H, RequestContext = R> + ::std::marker::Send + 'static>, ::fbthrift::ApplicationException>
where
F: ::fbthrift::Framing + ::std::marker::Send + ::std::marker::Sync + 'static,
H: Raiser,
R: ::fbthrift::RequestContext<Name = ::const_cstr::ConstCStr> + ::std::marker::Send + ::std::marker::Sync + 'static,
<R as ::fbthrift::RequestContext>::ContextStack: ::fbthrift::ContextStack + ::std::marker::Send + ::std::marker::Sync + 'static
{
match proto {
::fbthrift::ProtocolID::BinaryProtocol => {
::std::result::Result::Ok(::std::boxed::Box::new(RaiserProcessor::<::fbthrift::BinaryProtocol<F>, H, R>::new(handler)))
}
::fbthrift::ProtocolID::CompactProtocol => {
::std::result::Result::Ok(::std::boxed::Box::new(RaiserProcessor::<::fbthrift::CompactProtocol<F>, H, R>::new(handler)))
}
bad => ::std::result::Result::Err(::fbthrift::ApplicationException::invalid_protocol(bad)),
}
}
}
/// Client mocks. For every service, a struct mock::TheService that implements
/// client::TheService.
///
/// As an example of the generated API, for the following thrift service:
///
/// ```thrift
/// service MyService {
/// FunctionResponse myFunction(
/// 1: FunctionRequest request,
/// ) throws {
/// 1: StorageException s,
/// 2: NotFoundException n,
/// ),
///
/// // other functions
/// }
/// ```
///
/// we would end up with this mock object under crate::mock::MyService:
///
/// ```
/// # const _: &str = stringify! {
/// impl crate::client::MyService for MyService<'mock> {...}
///
/// pub struct MyService<'mock> {
/// pub myFunction: myFunction<'mock>,
/// // ...
/// }
///
/// impl dyn crate::client::MyService {
/// pub fn mock<'mock>() -> MyService<'mock>;
/// }
///
/// impl myFunction<'mock> {
/// // directly return the given success response
/// pub fn ret(&self, value: FunctionResponse);
///
/// // invoke closure to compute success response
/// pub fn mock(
/// &self,
/// mock: impl FnMut(FunctionRequest) -> FunctionResponse + Send + Sync + 'mock,
/// );
///
/// // invoke closure to compute response
/// pub fn mock_result(
/// &self,
/// mock: impl FnMut(FunctionRequest) -> Result<FunctionResponse, crate::services::MyService::MyFunctionExn> + Send + Sync + 'mock,
/// );
///
/// // return one of the function's declared exceptions
/// pub fn throw<E>(&self, exception: E)
/// where
/// E: Clone + Into<crate::services::MyService::MyFunctionExn> + Send + Sync + 'mock;
/// }
///
/// impl From<StorageException> for MyFunctionExn {...}
/// impl From<NotFoundException> for MyFunctionExn {...}
/// # };
/// ```
///
/// The intended usage from a test would be:
///
/// ```
/// # const _: &str = stringify! {
/// use std::sync::Arc;
/// use thrift_if::client::MyService;
///
/// #[test]
/// fn test_my_client() {
/// let mock = Arc::new(<dyn MyService>::mock());
///
/// // directly return a success response
/// let resp = FunctionResponse {...};
/// mock.myFunction.ret(resp);
///
/// // or give a closure to compute the success response
/// mock.myFunction.mock(|request| FunctionResponse {...});
///
/// // or throw one of the function's exceptions
/// mock.myFunction.throw(StorageException::ItFailed);
///
/// // or compute a Result (useful if your exceptions aren't Clone)
/// mock.myFunction.mock_result(|request| Err(...));
///
/// let out = do_the_thing(mock).wait().unwrap();
/// assert!(out.what_i_expected());
/// }
///
/// fn do_the_thing(
/// client: Arc<dyn MyService + Send + Sync + 'static>,
/// ) -> impl Future<Item = Out> {...}
/// # };
/// ```
pub mod mock {
pub struct Raiser<'mock> {
pub doBland: r#impl::raiser::doBland<'mock>,
pub doRaise: r#impl::raiser::doRaise<'mock>,
pub get200: r#impl::raiser::get200<'mock>,
pub get500: r#impl::raiser::get500<'mock>,
_marker: ::std::marker::PhantomData<&'mock ()>,
}
impl dyn super::client::Raiser {
pub fn mock<'mock>() -> Raiser<'mock> {
Raiser {
doBland: r#impl::raiser::doBland::unimplemented(),
doRaise: r#impl::raiser::doRaise::unimplemented(),
get200: r#impl::raiser::get200::unimplemented(),
get500: r#impl::raiser::get500::unimplemented(),
_marker: ::std::marker::PhantomData,
}
}
}
#[::async_trait::async_trait]
impl<'mock> super::client::Raiser for Raiser<'mock> {
fn doBland(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoBlandError>> + ::std::marker::Send + 'static>> {
let mut closure = self.doBland.closure.lock().unwrap();
let closure: &mut dyn ::std::ops::FnMut() -> _ = &mut **closure;
::std::boxed::Box::pin(::futures::future::ready(closure()))
}
fn doRaise(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<(), crate::errors::raiser::DoRaiseError>> + ::std::marker::Send + 'static>> {
let mut closure = self.doRaise.closure.lock().unwrap();
let closure: &mut dyn ::std::ops::FnMut() -> _ = &mut **closure;
::std::boxed::Box::pin(::futures::future::ready(closure()))
}
fn get200(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get200Error>> + ::std::marker::Send + 'static>> {
let mut closure = self.get200.closure.lock().unwrap();
let closure: &mut dyn ::std::ops::FnMut() -> _ = &mut **closure;
::std::boxed::Box::pin(::futures::future::ready(closure()))
}
fn get500(
&self,
) -> ::std::pin::Pin<::std::boxed::Box<dyn ::std::future::Future<Output = ::std::result::Result<::std::string::String, crate::errors::raiser::Get500Error>> + ::std::marker::Send + 'static>> {
let mut closure = self.get500.closure.lock().unwrap();
let closure: &mut dyn ::std::ops::FnMut() -> _ = &mut **closure;
::std::boxed::Box::pin(::futures::future::ready(closure()))
}
}
mod r#impl {
pub mod raiser {
pub struct doBland<'mock> {
pub(crate) closure: ::std::sync::Mutex<::std::boxed::Box<
dyn ::std::ops::FnMut() -> ::std::result::Result<
(),
crate::errors::raiser::DoBlandError,
> + ::std::marker::Send + ::std::marker::Sync + 'mock,
>>,
}
impl<'mock> doBland<'mock> {
pub fn unimplemented() -> Self {
doBland {
closure: ::std::sync::Mutex::new(::std::boxed::Box::new(|| panic!(
"{}::{} is not mocked",
"Raiser",
"doBland",
))),
}
}
pub fn ret(&self, value: ()) {
self.mock(move || value.clone());
}
pub fn mock(&self, mut mock: impl ::std::ops::FnMut() -> () + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Ok(mock()));
}
pub fn mock_result(&self, mut mock: impl ::std::ops::FnMut() -> ::std::result::Result<(), crate::errors::raiser::DoBlandError> + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || mock());
}
pub fn throw<E>(&self, exception: E)
where
E: ::std::convert::Into<crate::errors::raiser::DoBlandError>,
E: ::std::clone::Clone + ::std::marker::Send + ::std::marker::Sync + 'mock,
{
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Err(exception.clone().into()));
}
}
pub struct doRaise<'mock> {
pub(crate) closure: ::std::sync::Mutex<::std::boxed::Box<
dyn ::std::ops::FnMut() -> ::std::result::Result<
(),
crate::errors::raiser::DoRaiseError,
> + ::std::marker::Send + ::std::marker::Sync + 'mock,
>>,
}
impl<'mock> doRaise<'mock> {
pub fn unimplemented() -> Self {
doRaise {
closure: ::std::sync::Mutex::new(::std::boxed::Box::new(|| panic!(
"{}::{} is not mocked",
"Raiser",
"doRaise",
))),
}
}
pub fn ret(&self, value: ()) {
self.mock(move || value.clone());
}
pub fn mock(&self, mut mock: impl ::std::ops::FnMut() -> () + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Ok(mock()));
}
pub fn mock_result(&self, mut mock: impl ::std::ops::FnMut() -> ::std::result::Result<(), crate::errors::raiser::DoRaiseError> + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || mock());
}
pub fn throw<E>(&self, exception: E)
where
E: ::std::convert::Into<crate::errors::raiser::DoRaiseError>,
E: ::std::clone::Clone + ::std::marker::Send + ::std::marker::Sync + 'mock,
{
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Err(exception.clone().into()));
}
}
pub struct get200<'mock> {
pub(crate) closure: ::std::sync::Mutex<::std::boxed::Box<
dyn ::std::ops::FnMut() -> ::std::result::Result<
::std::string::String,
crate::errors::raiser::Get200Error,
> + ::std::marker::Send + ::std::marker::Sync + 'mock,
>>,
}
impl<'mock> get200<'mock> {
pub fn unimplemented() -> Self {
get200 {
closure: ::std::sync::Mutex::new(::std::boxed::Box::new(|| panic!(
"{}::{} is not mocked",
"Raiser",
"get200",
))),
}
}
pub fn ret(&self, value: ::std::string::String) {
self.mock(move || value.clone());
}
pub fn mock(&self, mut mock: impl ::std::ops::FnMut() -> ::std::string::String + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Ok(mock()));
}
pub fn mock_result(&self, mut mock: impl ::std::ops::FnMut() -> ::std::result::Result<::std::string::String, crate::errors::raiser::Get200Error> + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || mock());
}
pub fn throw<E>(&self, exception: E)
where
E: ::std::convert::Into<crate::errors::raiser::Get200Error>,
E: ::std::clone::Clone + ::std::marker::Send + ::std::marker::Sync + 'mock,
{
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Err(exception.clone().into()));
}
}
pub struct get500<'mock> {
pub(crate) closure: ::std::sync::Mutex<::std::boxed::Box<
dyn ::std::ops::FnMut() -> ::std::result::Result<
::std::string::String,
crate::errors::raiser::Get500Error,
> + ::std::marker::Send + ::std::marker::Sync + 'mock,
>>,
}
impl<'mock> get500<'mock> {
pub fn unimplemented() -> Self {
get500 {
closure: ::std::sync::Mutex::new(::std::boxed::Box::new(|| panic!(
"{}::{} is not mocked",
"Raiser",
"get500",
))),
}
}
pub fn ret(&self, value: ::std::string::String) {
self.mock(move || value.clone());
}
pub fn mock(&self, mut mock: impl ::std::ops::FnMut() -> ::std::string::String + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Ok(mock()));
}
pub fn mock_result(&self, mut mock: impl ::std::ops::FnMut() -> ::std::result::Result<::std::string::String, crate::errors::raiser::Get500Error> + ::std::marker::Send + ::std::marker::Sync + 'mock) {
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || mock());
}
pub fn throw<E>(&self, exception: E)
where
E: ::std::convert::Into<crate::errors::raiser::Get500Error>,
E: ::std::clone::Clone + ::std::marker::Send + ::std::marker::Sync + 'mock,
{
let mut closure = self.closure.lock().unwrap();
*closure = ::std::boxed::Box::new(move || ::std::result::Result::Err(exception.clone().into()));
}
}
}
}
}
/// Error return types.
pub mod errors {
/// Errors for Raiser functions.
pub mod raiser {
pub trait AsFiery {
fn as_fiery(&self) -> Option<&crate::types::Fiery>;
}
impl AsFiery for ::anyhow::Error {
fn as_fiery(&self) -> Option<&crate::types::Fiery> {
for cause in self.chain() {
if let Some(DoRaiseError::f(e)) = cause.downcast_ref::<DoRaiseError>() {
return Some(e);
}
if let Some(Get500Error::f(e)) = cause.downcast_ref::<Get500Error>() {
return Some(e);
}
}
None
}
}
pub trait AsSerious {
fn as_serious(&self) -> Option<&crate::types::Serious>;
}
impl AsSerious for ::anyhow::Error {
fn as_serious(&self) -> Option<&crate::types::Serious> {
for cause in self.chain() {
if let Some(DoRaiseError::s(e)) = cause.downcast_ref::<DoRaiseError>() {
return Some(e);
}
if let Some(Get500Error::s(e)) = cause.downcast_ref::<Get500Error>() {
return Some(e);
}
}
None
}
}
pub trait AsBanal {
fn as_banal(&self) -> Option<&crate::types::Banal>;
}
impl AsBanal for ::anyhow::Error {
fn as_banal(&self) -> Option<&crate::types::Banal> {
for cause in self.chain() {
if let Some(DoRaiseError::b(e)) = cause.downcast_ref::<DoRaiseError>() {
return Some(e);
}
if let Some(Get500Error::b(e)) = cause.downcast_ref::<Get500Error>() {
return Some(e);
}
}
None
}
}
pub type DoBlandError = ::fbthrift::NonthrowingFunctionError;
/// Errors for doRaise.
#[derive(Debug, ::thiserror::Error)]
pub enum DoRaiseError {
#[error("Raiser::doRaise failed with {0:?}")]
b(crate::types::Banal),
#[error("Raiser::doRaise failed with {0:?}")]
f(crate::types::Fiery),
#[error("Raiser::doRaise failed with {0:?}")]
s(crate::types::Serious),
#[error("Application exception: {0:?}")]
ApplicationException(::fbthrift::types::ApplicationException),
#[error("{0}")]
ThriftError(::anyhow::Error),
}
impl ::std::convert::From<crate::types::Banal> for DoRaiseError {
fn from(e: crate::types::Banal) -> Self {
DoRaiseError::b(e)
}
}
impl AsBanal for DoRaiseError {
fn as_banal(&self) -> Option<&crate::types::Banal> {
match self {
DoRaiseError::b(inner) => Some(inner),
_ => None,
}
}
}
impl ::std::convert::From<crate::types::Fiery> for DoRaiseError {
fn from(e: crate::types::Fiery) -> Self {
DoRaiseError::f(e)
}
}
impl AsFiery for DoRaiseError {
fn as_fiery(&self) -> Option<&crate::types::Fiery> {
match self {
DoRaiseError::f(inner) => Some(inner),
_ => None,
}
}
}
impl ::std::convert::From<crate::types::Serious> for DoRaiseError {
fn from(e: crate::types::Serious) -> Self {
DoRaiseError::s(e)
}
}
impl AsSerious for DoRaiseError {
fn as_serious(&self) -> Option<&crate::types::Serious> {
match self {
DoRaiseError::s(inner) => Some(inner),
_ => None,
}
}
}
impl ::std::convert::From<::anyhow::Error> for DoRaiseError {
fn from(err: ::anyhow::Error) -> Self {
DoRaiseError::ThriftError(err)
}
}
impl ::std::convert::From<::fbthrift::ApplicationException> for DoRaiseError {
fn from(ae: ::fbthrift::ApplicationException) -> Self {
DoRaiseError::ApplicationException(ae)
}
}
pub type Get200Error = ::fbthrift::NonthrowingFunctionError;
/// Errors for get500.
#[derive(Debug, ::thiserror::Error)]
pub enum Get500Error {
#[error("Raiser::get500 failed with {0:?}")]
f(crate::types::Fiery),
#[error("Raiser::get500 failed with {0:?}")]
b(crate::types::Banal),
#[error("Raiser::get500 failed with {0:?}")]
s(crate::types::Serious),
#[error("Application exception: {0:?}")]
ApplicationException(::fbthrift::types::ApplicationException),
#[error("{0}")]
ThriftError(::anyhow::Error),
}
impl ::std::convert::From<crate::types::Fiery> for Get500Error {
fn from(e: crate::types::Fiery) -> Self {
Get500Error::f(e)
}
}
impl AsFiery for Get500Error {
fn as_fiery(&self) -> Option<&crate::types::Fiery> {
match self {
Get500Error::f(inner) => Some(inner),
_ => None,
}
}
}
impl ::std::convert::From<crate::types::Banal> for Get500Error {
fn from(e: crate::types::Banal) -> Self {
Get500Error::b(e)
}
}
impl AsBanal for Get500Error {
fn as_banal(&self) -> Option<&crate::types::Banal> {
match self {
Get500Error::b(inner) => Some(inner),
_ => None,
}
}
}
impl ::std::convert::From<crate::types::Serious> for Get500Error {
fn from(e: crate::types::Serious) -> Self {
Get500Error::s(e)
}
}
impl AsSerious for Get500Error {
fn as_serious(&self) -> Option<&crate::types::Serious> {
match self {
Get500Error::s(inner) => Some(inner),
_ => None,
}
}
}
impl ::std::convert::From<::anyhow::Error> for Get500Error {
fn from(err: ::anyhow::Error) -> Self {
Get500Error::ThriftError(err)
}
}
impl ::std::convert::From<::fbthrift::ApplicationException> for Get500Error {
fn from(ae: ::fbthrift::ApplicationException) -> Self {
Get500Error::ApplicationException(ae)
}
}
}
}
| 42.99879 | 215 | 0.432899 |
79ac17862638ad0e1634aa02a66e0008a4c2ce53 | 3,388 | use state::*;
use result::ValidationResult;
pub trait Validator<M> {
fn validate(&mut self, model: &M) -> ValidationResult<bool>;
}
pub trait Rule<T, S>
{
fn validate(&self, input:&T, state: &mut S) -> ValidationResult<()>;
}
impl <T, S, F> Rule<T, S> for F
where F: Fn(&T, &mut S) -> ValidationResult<()>
{
fn validate(&self, input:&T, state: &mut S) -> ValidationResult<()> {
(*self)(input, state)
}
}
pub struct ValidationSchema<M> {
pub state: ValidationState,
pub rules: Vec<Box<Rule<M, ValidationState>>>
}
impl <M> ValidationSchema<M> {
pub fn new() -> Self {
ValidationSchema {
state: ValidationState::new(),
rules: vec![],
}
}
pub fn rule(&mut self, r: Box<Rule<M, ValidationState>>)
{
self.rules.push(r)
}
}
impl <M> Validator<M> for ValidationSchema<M> {
fn validate(&mut self, model: &M) -> ValidationResult<bool> {
for rule in self.rules.iter() {
if let Err(err) = rule.validate(model, &mut self.state) {
self.state.valid = false;
self.state.errors.push(err);
}
}
Ok(self.state.valid)
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::result::*;
use super::super::state::*;
#[allow(dead_code)]
struct TestStruct {
num: i32,
text: String,
}
impl TestStruct {
pub fn new<T>(num: i32, text: T) -> TestStruct where T: Into<String> {
TestStruct {
num: num,
text: text.into(),
}
}
}
#[test]
pub fn test_null_rule() {
let mut v = ValidationSchema::<TestStruct>::new();
v.rule(Box::new(|_m: &TestStruct, _vs: &mut ValidationState| {
Ok(())
}));
let a = TestStruct::new(123, "hello");
assert_eq!(v.validate(&a).unwrap_or(false), true);
}
#[test]
pub fn test_accept_rule() {
let mut v = ValidationSchema::<TestStruct>::new();
v.rule(Box::new(|_m: &TestStruct, vs: &mut ValidationState| {
vs.accept("field name");
Ok(())
}));
let a = TestStruct::new(123, "hello");
assert_eq!(v.validate(&a).unwrap_or(false), true);
}
#[test]
pub fn test_reject_rule() {
let mut v = ValidationSchema::<TestStruct>::new();
v.rule(Box::new(|_m: &TestStruct, vs: &mut ValidationState| {
vs.reject("field name", ValidationError::InvalidValue("test error".to_owned()));
Ok(())
}));
let a = TestStruct::new(123, "hello");
assert_eq!(v.validate(&a).unwrap_or(true), false);
}
#[test]
pub fn test_err_rule() {
let mut v = ValidationSchema::<TestStruct>::new();
v.rule(Box::new(|_m: &TestStruct, _vs: &mut ValidationState| -> ValidationResult<()> {
Err(ValidationError::ApplicationError("test error".to_owned()))
}));
let a = TestStruct::new(123, "hello");
assert_eq!(v.validate(&a).unwrap_or(true), false);
assert_eq!(v.state.errors.len(), 1);
assert_eq!(format!("{}", v.state.errors.get(0).unwrap()), "Application error: test error");
}
} | 26.263566 | 99 | 0.521251 |
769a9b723e17222f67f774732d929d087bbebd6d | 462 | use crate::err::ProcessingResult;
use crate::proc::MatchAction::*;
use crate::proc::MatchMode::*;
use crate::proc::Processor;
pub fn process_comment(proc: &mut Processor) -> ProcessingResult<()> {
proc.m(IsSeq(b"<!--"), Discard).expect();
loop {
// Use fast memchr.
let possible = proc.m(ThroughChar(b'>'), Discard).require("comment end")?;
if proc[possible].ends_with(b"-->") {
break;
};
};
Ok(())
}
| 27.176471 | 82 | 0.580087 |
26aab751ba208e7f8510aac0146b535375fe6659 | 2,523 | //! JWT layer
use crate::{errors::AppErrorMessage, layers, models::auth};
use axum::{
body::{Body, Full},
http::{HeaderValue, Request, StatusCode},
response::Response,
};
use bytes::Bytes;
use futures::future::BoxFuture;
use std::task::{Context, Poll};
use tower::{Layer, Service};
pub struct JwtLayer;
impl<S> Layer<S> for JwtLayer {
type Service = JwtMiddleware<S>;
fn layer(&self, inner: S) -> Self::Service {
JwtMiddleware { inner }
}
}
#[derive(Clone)]
pub struct JwtMiddleware<S> {
inner: S,
}
impl<S> Service<Request<Body>> for JwtMiddleware<S>
where
S: Service<Request<Body>, Response = Response> + Send + 'static,
S::Future: Send + 'static,
{
type Response = S::Response;
type Error = S::Error;
// `BoxFuture` is a type alias for `Pin<Box<dyn Future + Send + 'a>>`
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, request: Request<Body>) -> Self::Future {
let is_authorized = match request.extensions().get::<layers::SharedState>() {
Some(state) => {
let state = state.clone();
auth::Claims::extract_from_request(request.headers(), state.jwt_secret_key.clone()).is_some()
}
_ => false,
};
let future = self.inner.call(request);
Box::pin(async move {
let mut response = Response::default();
response = match is_authorized {
true => future.await?,
false => {
let (mut parts, _body) = response.into_parts();
// Status
parts.status = StatusCode::UNAUTHORIZED;
// Content Type
parts.headers.insert(
axum::http::header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
);
// Body
let msg = serde_json::json!(AppErrorMessage {
code: StatusCode::UNAUTHORIZED.as_u16(),
message: String::from("Unauthorized"),
});
let msg = Bytes::from(msg.to_string());
Response::from_parts(parts, axum::body::boxed(Full::from(msg)))
}
};
Ok(response)
})
}
}
| 29.682353 | 109 | 0.529132 |
c12a4e91e065abbe004008f0246a09bcde5b5d74 | 1,576 | use actix_web::{get, post, web, App, HttpResponse, HttpServer, Responder };
use diesel::prelude::*;
use diesel::pg::PgConnection;
use diesel::r2d2::{self, ConnectionManager};
use dotenv::dotenv;
use std::env;
#[macro_use]
extern crate diesel;
pub mod routes;
pub mod models;
pub mod schema;
// use self::models::*;
type DbPool = r2d2::Pool<ConnectionManager<PgConnection>>;
pub fn create_db_pool() -> DbPool {
dotenv().ok();
let database_url = env::var("DATABASE_URL")
.expect("DATABASE_URL must be set");
let manager = ConnectionManager::<PgConnection>::new(database_url);
let pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool.");
pool
// PgConnection::establish(&database_url)
// .expect(&format!("Error connecting to {}", database_url))
}
#[get("/")]
async fn hello(data: web::Data<AppState>) -> impl Responder {
HttpResponse::Ok().body("Hello world")
}
#[post("/echo")]
async fn echo(req_body: String) -> impl Responder {
HttpResponse::Ok().body(req_body)
}
struct AppState {
pub connection: PgConnection
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
// std::env::set_var("RUST_LOG", "actix_web=info");
let pool = create_db_pool();
HttpServer::new(move || {
App::new()
// .data(AppState {
// connection: connection
// })
.data(pool.clone())
.service(hello)
.service(routes::user::find_all)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
| 22.514286 | 75 | 0.609772 |
01855924eb7229a75b5a30b0aa525ff61eb2bb63 | 48,322 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
/// The main job of `VsockConnection` is to forward data traffic, back and forth, between a
/// guest-side AF_VSOCK socket and a host-side generic `Read + Write + AsRawFd` stream, while
/// also managing its internal state.
/// To that end, `VsockConnection` implements:
/// - `VsockChannel` for:
/// - moving data from the host stream to a guest-provided RX buffer, via `recv_pkt()`; and
/// - moving data from a guest-provided TX buffer to the host stream, via `send_pkt()`; and
/// - updating its internal state, by absorbing control packets (anything other than
/// VSOCK_OP_RW).
/// - `VsockEpollListener` for getting notified about the availability of data or free buffer
/// space at the host stream.
///
/// Note: there is a certain asymmetry to the RX and TX data flows:
/// - RX transfers do not need any data buffering, since data is read straight from the
/// host stream and into the guest-provided RX buffer;
/// - TX transfers may require some data to be buffered by `VsockConnection`, if the host
/// peer can't keep up with reading the data that we're writing. This is because, once
/// the guest driver provides some data in a virtio TX buffer, the vsock device must
/// consume it. If that data can't be forwarded straight to the host stream, we'll
/// have to store it in a buffer (and flush it at a later time). Vsock flow control
/// ensures that our TX buffer doesn't overflow.
// The code in this file is best read with a fresh memory of the vsock protocol inner-workings.
// To help with that, here is a
//
// Short primer on the vsock protocol
// ----------------------------------
//
// 1. Establishing a connection
// A vsock connection is considered established after a two-way handshake:
// - the initiating peer sends a connection request packet (`hdr.op` == VSOCK_OP_REQUEST);
// then
// - the listening peer sends back a connection response packet (`hdr.op` ==
// VSOCK_OP_RESPONSE).
//
// 2. Terminating a connection
// When a peer wants to shut down an established connection, it sends a VSOCK_OP_SHUTDOWN
// packet. Two header flags are used with VSOCK_OP_SHUTDOWN, indicating the sender's
// intention:
// - VSOCK_FLAGS_SHUTDOWN_RCV: the sender will receive no more data for this connection; and
// - VSOCK_FLAGS_SHUTDOWN_SEND: the sender will send no more data for this connection.
// After a shutdown packet, the receiving peer will have some protocol-undefined time to
// flush its buffers, and then forcefully terminate the connection by sending back an RST
// packet. If the shutdown-initiating peer doesn't receive this RST packet during a timeout
// period, it will send one itself, thus terminating the connection.
// Note: a peer can send more than one VSOCK_OP_SHUTDOWN packets. However, read/write
// indications cannot be undone. E.g. once a "no-more-sending" promise was made, it
// cannot be taken back. That is, `hdr.flags` will be ORed between subsequent
// VSOCK_OP_SHUTDOWN packets.
//
// 3. Flow control
// Before sending a data packet (VSOCK_OP_RW), the sender must make sure that the receiver
// has enough free buffer space to store that data. If this condition is not respected, the
// receiving peer's behaviour is undefined. In this implementation, we forcefully terminate
// the connection by sending back a VSOCK_OP_RST packet.
// Note: all buffer space information is computed and stored on a per-connection basis.
// Peers keep each other informed about the free buffer space they have by filling in two
// packet header members with each packet they send:
// - `hdr.buf_alloc`: the total buffer space the peer has allocated for receiving data; and
// - `hdr.fwd_cnt`: the total number of bytes the peer has successfully flushed out of its
// buffer.
// One can figure out how much space its peer has available in its buffer by inspecting the
// difference between how much it has sent to the peer and how much the peer has flushed out
// (i.e. "forwarded", in the vsock spec terminology):
// `peer_free = peer_buf_alloc - (total_bytes_sent_to_peer - peer_fwd_cnt)`.
// Note: the above requires that peers constantly keep each other informed on their buffer
// space situation. However, since there are no receipt acknowledgement packets
// defined for the vsock protocol, packet flow can often be unidirectional (just one
// peer sending data to another), so the sender's information about the receiver's
// buffer space can get quickly outdated. The vsock protocol defines two solutions to
// this problem:
// 1. The sender can explicitly ask for a buffer space (i.e. "credit") update from its
// peer, via a VSOCK_OP_CREDIT_REQUEST packet, to which it will get a
// VSOCK_OP_CREDIT_UPDATE response (or any response will do, really, since credit
// information must be included in any packet);
// 2. The receiver can be proactive, and send VSOCK_OP_CREDIT_UPDATE packet, whenever
// it thinks its peer's information is out of date.
// Our implementation uses the proactive approach.
use std::io::{ErrorKind, Read, Write};
use std::num::Wrapping;
use std::os::unix::io::{AsRawFd, RawFd};
use std::time::{Duration, Instant};
use utils::epoll::EventSet;
use super::super::defs::uapi;
use super::super::packet::VsockPacket;
use super::super::{Result as VsockResult, VsockChannel, VsockEpollListener, VsockError};
use super::defs;
use super::txbuf::TxBuf;
use super::{ConnState, Error, PendingRx, PendingRxSet, Result};
/// A self-managing connection object, that handles communication between a guest-side AF_VSOCK
/// socket and a host-side `Read + Write + AsRawFd` stream.
pub struct VsockConnection<S: Read + Write + AsRawFd> {
/// The current connection state.
state: ConnState,
/// The local CID. Most of the time this will be the constant `2` (the vsock host CID).
local_cid: u64,
/// The peer (guest) CID.
peer_cid: u64,
/// The local (host) port.
local_port: u32,
/// The peer (guest) port.
peer_port: u32,
/// The (connected) host-side stream.
stream: S,
/// The TX buffer for this connection.
tx_buf: TxBuf,
/// Total number of bytes that have been successfully written to `self.stream`, either
/// directly, or flushed from `self.tx_buf`.
fwd_cnt: Wrapping<u32>,
/// The amount of buffer space that the peer (guest) has allocated for this connection.
peer_buf_alloc: u32,
/// The total number of bytes that the peer has forwarded away.
peer_fwd_cnt: Wrapping<u32>,
/// The total number of bytes sent to the peer (guest vsock driver)
rx_cnt: Wrapping<u32>,
/// Our `self.fwd_cnt`, as last sent to the peer. This is used to provide proactive credit
/// updates, and let the peer know it's OK to send more data.
last_fwd_cnt_to_peer: Wrapping<u32>,
/// The set of pending RX packet indications that `recv_pkt()` will use to fill in a
/// packet for the peer (guest).
pending_rx: PendingRxSet,
/// Instant when this connection should be scheduled for immediate termination, due to some
/// timeout condition having been fulfilled.
expiry: Option<Instant>,
}
impl<S> VsockChannel for VsockConnection<S>
where
S: Read + Write + AsRawFd,
{
/// Fill in a vsock packet, to be delivered to our peer (the guest driver).
///
/// As per the `VsockChannel` trait, this should only be called when there is data to be
/// fetched from the channel (i.e. `has_pending_rx()` is true). Otherwise, it will error
/// out with `VsockError::NoData`.
/// Pending RX indications are set by other mutable actions performed on the channel. For
/// instance, `send_pkt()` could set an Rst indication, if called with a VSOCK_OP_SHUTDOWN
/// packet, or `notify()` could set a Rw indication (a data packet can be fetched from the
/// channel), if data was ready to be read from the host stream.
///
/// Returns:
/// - `Ok(())`: the packet has been successfully filled in and is ready for delivery;
/// - `Err(VsockError::NoData)`: there was no data available with which to fill in the
/// packet;
/// - `Err(VsockError::PktBufMissing)`: the packet would've been filled in with data, but
/// it is missing the data buffer.
fn recv_pkt(&mut self, pkt: &mut VsockPacket) -> VsockResult<()> {
// Perform some generic initialization that is the same for any packet operation (e.g.
// source, destination, credit, etc).
self.init_pkt(pkt);
// If forceful termination is pending, there's no point in checking for anything else.
// It's dead, Jim.
if self.pending_rx.remove(PendingRx::Rst) {
pkt.set_op(uapi::VSOCK_OP_RST);
return Ok(());
}
// Next up: if we're due a connection confirmation, that's all we need to know to fill
// in this packet.
if self.pending_rx.remove(PendingRx::Response) {
self.state = ConnState::Established;
pkt.set_op(uapi::VSOCK_OP_RESPONSE);
return Ok(());
}
// Same thing goes for locally-initiated connections that need to yield a connection
// request.
if self.pending_rx.remove(PendingRx::Request) {
self.expiry =
Some(Instant::now() + Duration::from_millis(defs::CONN_REQUEST_TIMEOUT_MS));
pkt.set_op(uapi::VSOCK_OP_REQUEST);
return Ok(());
}
// A credit update is basically a no-op, so we should only waste a perfectly fine RX
// buffer on it if we really have nothing else to say.
if self.pending_rx.remove(PendingRx::CreditUpdate) && !self.has_pending_rx() {
pkt.set_op(uapi::VSOCK_OP_CREDIT_UPDATE);
self.last_fwd_cnt_to_peer = self.fwd_cnt;
return Ok(());
}
// Alright, if we got to here, we need to cough up a data packet. We've already checked
// for all other pending RX indications.
if !self.pending_rx.remove(PendingRx::Rw) {
return Err(VsockError::NoData);
}
match self.state {
// A data packet is only valid for established connections, and connections for
// which our peer has initiated a graceful shutdown, but can still receive data.
ConnState::Established | ConnState::PeerClosed(false, _) => (),
_ => {
// Any other connection state is invalid at this point, and we need to kill it
// with fire.
pkt.set_op(uapi::VSOCK_OP_RST);
return Ok(());
}
}
// Oh wait, before we start bringing in the big data, can our peer handle receiving so
// much bytey goodness?
if self.need_credit_update_from_peer() {
self.last_fwd_cnt_to_peer = self.fwd_cnt;
pkt.set_op(uapi::VSOCK_OP_CREDIT_REQUEST);
return Ok(());
}
let buf = pkt.buf_mut().ok_or(VsockError::PktBufMissing)?;
// The maximum amount of data we can read in is limited by both the RX buffer size and
// the peer available buffer space.
let max_len = std::cmp::min(buf.len(), self.peer_avail_credit());
// Read data from the stream straight to the RX buffer, for maximum throughput.
match self.stream.read(&mut buf[..max_len]) {
Ok(read_cnt) => {
if read_cnt == 0 {
// A 0-length read means the host stream was closed down. In that case,
// we'll ask our peer to shut down the connection. We can neither send nor
// receive any more data.
self.state = ConnState::LocalClosed;
self.expiry = Some(
Instant::now() + Duration::from_millis(defs::CONN_SHUTDOWN_TIMEOUT_MS),
);
pkt.set_op(uapi::VSOCK_OP_SHUTDOWN)
.set_flag(uapi::VSOCK_FLAGS_SHUTDOWN_RCV)
.set_flag(uapi::VSOCK_FLAGS_SHUTDOWN_SEND);
} else {
// On a successful data read, we fill in the packet with the RW op, and
// length of the read data.
pkt.set_op(uapi::VSOCK_OP_RW).set_len(read_cnt as u32);
}
}
Err(err) => {
// We are not expecting any errors when reading from the underlying stream. If
// any show up, we'll immediately kill this connection.
error!(
"vsock: error reading from backing stream: lp={}, pp={}, err={:?}",
self.local_port, self.peer_port, err
);
pkt.set_op(uapi::VSOCK_OP_RST);
}
};
self.rx_cnt += Wrapping(pkt.len());
self.last_fwd_cnt_to_peer = self.fwd_cnt;
Ok(())
}
/// Deliver a guest-generated packet to this connection.
///
/// This forwards the data in RW packets to the host stream, and absorbs control packets,
/// using them to manage the internal connection state.
///
/// Returns:
/// always `Ok(())`: the packet has been consumed;
fn send_pkt(&mut self, pkt: &VsockPacket) -> VsockResult<()> {
// Update the peer credit information.
self.peer_buf_alloc = pkt.buf_alloc();
self.peer_fwd_cnt = Wrapping(pkt.fwd_cnt());
match self.state {
// Most frequent case: this is an established connection that needs to forward some
// data to the host stream. Also works for a connection that has begun shutting
// down, but the peer still has some data to send.
ConnState::Established | ConnState::PeerClosed(_, false)
if pkt.op() == uapi::VSOCK_OP_RW =>
{
if pkt.buf().is_none() {
info!(
"vsock: dropping empty data packet from guest (lp={}, pp={}",
self.local_port, self.peer_port
);
return Ok(());
}
// Unwrapping here is safe, since we just checked `pkt.buf()` above.
let buf_slice = &pkt.buf().unwrap()[..(pkt.len() as usize)];
if let Err(err) = self.send_bytes(buf_slice) {
// If we can't write to the host stream, that's an unrecoverable error, so
// we'll terminate this connection.
warn!(
"vsock: error writing to local stream (lp={}, pp={}): {:?}",
self.local_port, self.peer_port, err
);
self.kill();
return Ok(());
}
// We might've just consumed some data. If that's the case, we might need to
// update the peer on our buffer space situation, so that it can keep sending
// data packets our way.
if self.peer_needs_credit_update() {
self.pending_rx.insert(PendingRx::CreditUpdate);
}
}
// Next up: receiving a response / confirmation for a host-initiated connection.
// We'll move to an Established state, and pass on the good news through the host
// stream.
ConnState::LocalInit if pkt.op() == uapi::VSOCK_OP_RESPONSE => {
self.expiry = None;
self.state = ConnState::Established;
}
// The peer wants to shut down an established connection. If they have nothing
// more to send nor receive, and we don't have to wait to drain our TX buffer, we
// can schedule an RST packet (to terminate the connection on the next recv call).
// Otherwise, we'll arm the kill timer.
ConnState::Established if pkt.op() == uapi::VSOCK_OP_SHUTDOWN => {
let recv_off = pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_RCV != 0;
let send_off = pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_SEND != 0;
self.state = ConnState::PeerClosed(recv_off, send_off);
if recv_off && send_off {
if self.tx_buf.is_empty() {
self.pending_rx.insert(PendingRx::Rst);
} else {
self.expiry = Some(
Instant::now() + Duration::from_millis(defs::CONN_SHUTDOWN_TIMEOUT_MS),
);
}
}
}
// The peer wants to update a shutdown request, with more receive/send indications.
// The same logic as above applies.
ConnState::PeerClosed(ref mut recv_off, ref mut send_off)
if pkt.op() == uapi::VSOCK_OP_SHUTDOWN =>
{
*recv_off = *recv_off || (pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_RCV != 0);
*send_off = *send_off || (pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_SEND != 0);
if *recv_off && *send_off && self.tx_buf.is_empty() {
self.pending_rx.insert(PendingRx::Rst);
}
}
// A credit update from our peer is valid only in a state which allows data
// transfer towards the peer.
ConnState::Established | ConnState::PeerInit | ConnState::PeerClosed(false, _)
if pkt.op() == uapi::VSOCK_OP_CREDIT_UPDATE =>
{
// Nothing to do here; we've already updated peer credit.
}
// A credit request from our peer is valid only in a state which allows data
// transfer from the peer. We'll respond with a credit update packet.
ConnState::Established | ConnState::PeerInit | ConnState::PeerClosed(_, false)
if pkt.op() == uapi::VSOCK_OP_CREDIT_REQUEST =>
{
self.pending_rx.insert(PendingRx::CreditUpdate);
}
_ => {
debug!(
"vsock: dropping invalid TX pkt for connection: state={:?}, pkt.hdr={:?}",
self.state,
pkt.hdr()
);
}
};
Ok(())
}
/// Check if the connection has any pending packet addressed to the peer.
fn has_pending_rx(&self) -> bool {
!self.pending_rx.is_empty()
}
}
impl<S> AsRawFd for VsockConnection<S>
where
S: Read + Write + AsRawFd,
{
/// Get the file descriptor that this connection wants polled.
///
/// The connection is interested in being notified about EPOLLIN / EPOLLOUT events on the
/// host stream.
fn as_raw_fd(&self) -> RawFd {
self.stream.as_raw_fd()
}
}
impl<S> VsockEpollListener for VsockConnection<S>
where
S: Read + Write + AsRawFd,
{
/// Get the event set that this connection is interested in.
///
/// A connection will want to be notified when:
/// - data is available to be read from the host stream, so that it can store an RW pending
/// RX indication; and
/// - data can be written to the host stream, and the TX buffer needs to be flushed.
fn get_polled_evset(&self) -> EventSet {
let mut evset = EventSet::empty();
if !self.tx_buf.is_empty() {
// There's data waiting in the TX buffer, so we are interested in being notified
// when writing to the host stream wouldn't block.
evset.insert(EventSet::OUT);
}
// We're generally interested in being notified when data can be read from the host
// stream, unless we're in a state which doesn't allow moving data from host to guest.
match self.state {
ConnState::Killed | ConnState::LocalClosed | ConnState::PeerClosed(true, _) => (),
_ if self.need_credit_update_from_peer() => (),
_ => evset.insert(EventSet::IN),
}
evset
}
/// Notify the connection about an event (or set of events) that it was interested in.
fn notify(&mut self, evset: EventSet) {
if evset.contains(EventSet::IN) {
// Data can be read from the host stream. Setting a Rw pending indication, so that
// the muxer will know to call `recv_pkt()` later.
self.pending_rx.insert(PendingRx::Rw);
}
if evset.contains(EventSet::OUT) {
// Data can be written to the host stream. Time to flush out the TX buffer.
//
if self.tx_buf.is_empty() {
info!("vsock: connection received unexpected EPOLLOUT event");
return;
}
let flushed = self
.tx_buf
.flush_to(&mut self.stream)
.unwrap_or_else(|err| {
warn!(
"vsock: error flushing TX buf for (lp={}, pp={}): {:?}",
self.local_port, self.peer_port, err
);
self.kill();
0
});
self.fwd_cnt += Wrapping(flushed as u32);
// If this connection was shutting down, but is waiting to drain the TX buffer
// before forceful termination, the wait might be over.
if self.state == ConnState::PeerClosed(true, true) && self.tx_buf.is_empty() {
self.pending_rx.insert(PendingRx::Rst);
} else if self.peer_needs_credit_update() {
// If we've freed up some more buffer space, we may need to let the peer know it
// can safely send more data our way.
self.pending_rx.insert(PendingRx::CreditUpdate);
}
}
}
}
impl<S> VsockConnection<S>
where
S: Read + Write + AsRawFd,
{
/// Create a new guest-initiated connection object.
pub fn new_peer_init(
stream: S,
local_cid: u64,
peer_cid: u64,
local_port: u32,
peer_port: u32,
peer_buf_alloc: u32,
) -> Self {
Self {
local_cid,
peer_cid,
local_port,
peer_port,
stream,
state: ConnState::PeerInit,
tx_buf: TxBuf::new(),
fwd_cnt: Wrapping(0),
peer_buf_alloc,
peer_fwd_cnt: Wrapping(0),
rx_cnt: Wrapping(0),
last_fwd_cnt_to_peer: Wrapping(0),
pending_rx: PendingRxSet::from(PendingRx::Response),
expiry: None,
}
}
/// Create a new host-initiated connection object.
pub fn new_local_init(
stream: S,
local_cid: u64,
peer_cid: u64,
local_port: u32,
peer_port: u32,
) -> Self {
Self {
local_cid,
peer_cid,
local_port,
peer_port,
stream,
state: ConnState::LocalInit,
tx_buf: TxBuf::new(),
fwd_cnt: Wrapping(0),
peer_buf_alloc: 0,
peer_fwd_cnt: Wrapping(0),
rx_cnt: Wrapping(0),
last_fwd_cnt_to_peer: Wrapping(0),
pending_rx: PendingRxSet::from(PendingRx::Request),
expiry: None,
}
}
/// Check if there is an expiry (kill) timer set for this connection, sometime in the
/// future.
pub fn will_expire(&self) -> bool {
match self.expiry {
None => false,
Some(t) => t > Instant::now(),
}
}
/// Check if this connection needs to be scheduled for forceful termination, due to its
/// kill timer having expired.
pub fn has_expired(&self) -> bool {
match self.expiry {
None => false,
Some(t) => t <= Instant::now(),
}
}
/// Get the kill timer value, if one is set.
pub fn expiry(&self) -> Option<Instant> {
self.expiry
}
/// Schedule the connection to be forcefully terminated ASAP (i.e. the next time the
/// connection is asked to yield a packet, via `recv_pkt()`).
pub fn kill(&mut self) {
self.state = ConnState::Killed;
self.pending_rx.insert(PendingRx::Rst);
}
/// Send some raw data (a byte-slice) to the host stream.
///
/// Raw data can either be sent straight to the host stream, or to our TX buffer, if the
/// former fails.
pub fn send_bytes(&mut self, buf: &[u8]) -> Result<()> {
// If there is data in the TX buffer, that means we're already registered for EPOLLOUT
// events on the underlying stream. Therefore, there's no point in attempting a write
// at this point. `self.notify()` will get called when EPOLLOUT arrives, and it will
// attempt to drain the TX buffer then.
if !self.tx_buf.is_empty() {
return self.tx_buf.push(buf);
}
// The TX buffer is empty, so we can try to write straight to the host stream.
let written = match self.stream.write(buf) {
Ok(cnt) => cnt,
Err(e) => {
// Absorb any would-block errors, since we can always try again later.
if e.kind() == ErrorKind::WouldBlock {
0
} else {
// We don't know how to handle any other write error, so we'll send it up
// the call chain.
return Err(Error::StreamWrite(e));
}
}
};
// Move the "forwarded bytes" counter ahead by how much we were able to send out.
self.fwd_cnt += Wrapping(written as u32);
// If we couldn't write the whole slice, we'll need to push the remaining data to our
// buffer.
if written < buf.len() {
self.tx_buf.push(&buf[written..])?;
}
Ok(())
}
/// Return the connections state.
pub fn state(&self) -> ConnState {
self.state
}
/// Check if the credit information the peer has last received from us is outdated.
fn peer_needs_credit_update(&self) -> bool {
(self.fwd_cnt - self.last_fwd_cnt_to_peer).0 as usize >= defs::CONN_CREDIT_UPDATE_THRESHOLD
}
/// Check if we need to ask the peer for a credit update before sending any more data its
/// way.
fn need_credit_update_from_peer(&self) -> bool {
self.peer_avail_credit() == 0
}
/// Get the maximum number of bytes that we can send to our peer, without overflowing its
/// buffer.
fn peer_avail_credit(&self) -> usize {
(Wrapping(self.peer_buf_alloc as u32) - (self.rx_cnt - self.peer_fwd_cnt)).0 as usize
}
/// Prepare a packet header for transmission to our peer.
fn init_pkt<'a>(&self, pkt: &'a mut VsockPacket) -> &'a mut VsockPacket {
// Make sure the header is zeroed-out first.
// This looks sub-optimal, but it is actually optimized-out in the compiled code to be
// faster than a memset().
for b in pkt.hdr_mut() {
*b = 0;
}
pkt.set_src_cid(self.local_cid)
.set_dst_cid(self.peer_cid)
.set_src_port(self.local_port)
.set_dst_port(self.peer_port)
.set_type(uapi::VSOCK_TYPE_STREAM)
.set_buf_alloc(defs::CONN_TX_BUF_SIZE as u32)
.set_fwd_cnt(self.fwd_cnt.0)
}
}
#[cfg(test)]
mod tests {
use std::io::{Error as IoError, ErrorKind, Read, Result as IoResult, Write};
use std::os::unix::io::RawFd;
use std::time::{Duration, Instant};
use utils::eventfd::EventFd;
use super::super::super::defs::uapi;
use super::super::super::tests::TestContext;
use super::super::defs as csm_defs;
use super::*;
use crate::virtio::vsock::device::RXQ_INDEX;
const LOCAL_CID: u64 = 2;
const PEER_CID: u64 = 3;
const LOCAL_PORT: u32 = 1002;
const PEER_PORT: u32 = 1003;
const PEER_BUF_ALLOC: u32 = 64 * 1024;
enum StreamState {
Closed,
Error(ErrorKind),
Ready,
WouldBlock,
}
struct TestStream {
fd: EventFd,
read_buf: Vec<u8>,
read_state: StreamState,
write_buf: Vec<u8>,
write_state: StreamState,
}
impl TestStream {
fn new() -> Self {
Self {
fd: EventFd::new(libc::EFD_NONBLOCK).unwrap(),
read_state: StreamState::Ready,
write_state: StreamState::Ready,
read_buf: Vec::new(),
write_buf: Vec::new(),
}
}
fn new_with_read_buf(buf: &[u8]) -> Self {
let mut stream = Self::new();
stream.read_buf = buf.to_vec();
stream
}
}
impl AsRawFd for TestStream {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl Read for TestStream {
fn read(&mut self, data: &mut [u8]) -> IoResult<usize> {
match self.read_state {
StreamState::Closed => Ok(0),
StreamState::Error(kind) => Err(IoError::new(kind, "whatevs")),
StreamState::Ready => {
if self.read_buf.is_empty() {
return Err(IoError::new(ErrorKind::WouldBlock, "EAGAIN"));
}
let len = std::cmp::min(data.len(), self.read_buf.len());
assert_ne!(len, 0);
data[..len].copy_from_slice(&self.read_buf[..len]);
self.read_buf = self.read_buf.split_off(len);
Ok(len)
}
StreamState::WouldBlock => Err(IoError::new(ErrorKind::WouldBlock, "EAGAIN")),
}
}
}
impl Write for TestStream {
fn write(&mut self, data: &[u8]) -> IoResult<usize> {
match self.write_state {
StreamState::Closed => Err(IoError::new(ErrorKind::BrokenPipe, "EPIPE")),
StreamState::Error(kind) => Err(IoError::new(kind, "whatevs")),
StreamState::Ready => {
self.write_buf.extend_from_slice(data);
Ok(data.len())
}
StreamState::WouldBlock => Err(IoError::new(ErrorKind::WouldBlock, "EAGAIN")),
}
}
fn flush(&mut self) -> IoResult<()> {
Ok(())
}
}
fn init_pkt(pkt: &mut VsockPacket, op: u16, len: u32) -> &mut VsockPacket {
for b in pkt.hdr_mut() {
*b = 0;
}
pkt.set_src_cid(PEER_CID)
.set_dst_cid(LOCAL_CID)
.set_src_port(PEER_PORT)
.set_dst_port(LOCAL_PORT)
.set_type(uapi::VSOCK_TYPE_STREAM)
.set_buf_alloc(PEER_BUF_ALLOC)
.set_op(op)
.set_len(len)
}
// This is the connection state machine test context: a helper struct to provide CSM testing
// primitives. A single `VsockPacket` object will be enough for our testing needs. We'll be
// using it for simulating both packet sends and packet receives. We need to keep the vsock
// testing context alive, since `VsockPacket` is just a pointer-wrapper over some data that
// resides in guest memory. The vsock test context owns the `GuestMemoryMmap` object, so we'll make
// it a member here, in order to make sure that guest memory outlives our testing packet. A
// single `VsockConnection` object will also suffice for our testing needs. We'll be using a
// specially crafted `Read + Write + AsRawFd` object as a backing stream, so that we can
// control the various error conditions that might arise.
struct CsmTestContext {
_vsock_test_ctx: TestContext,
pkt: VsockPacket,
conn: VsockConnection<TestStream>,
}
impl CsmTestContext {
fn new_established() -> Self {
Self::new(ConnState::Established)
}
fn new(conn_state: ConnState) -> Self {
let vsock_test_ctx = TestContext::new();
let mut handler_ctx = vsock_test_ctx.create_event_handler_context();
let stream = TestStream::new();
let mut pkt = VsockPacket::from_rx_virtq_head(
&handler_ctx.device.queues[RXQ_INDEX]
.pop(&vsock_test_ctx.mem)
.unwrap(),
)
.unwrap();
let conn = match conn_state {
ConnState::PeerInit => VsockConnection::<TestStream>::new_peer_init(
stream,
LOCAL_CID,
PEER_CID,
LOCAL_PORT,
PEER_PORT,
PEER_BUF_ALLOC,
),
ConnState::LocalInit => VsockConnection::<TestStream>::new_local_init(
stream, LOCAL_CID, PEER_CID, LOCAL_PORT, PEER_PORT,
),
ConnState::Established => {
let mut conn = VsockConnection::<TestStream>::new_peer_init(
stream,
LOCAL_CID,
PEER_CID,
LOCAL_PORT,
PEER_PORT,
PEER_BUF_ALLOC,
);
assert!(conn.has_pending_rx());
conn.recv_pkt(&mut pkt).unwrap();
assert_eq!(pkt.op(), uapi::VSOCK_OP_RESPONSE);
conn
}
other => panic!("invalid ctx state: {:?}", other),
};
assert_eq!(conn.state, conn_state);
Self {
_vsock_test_ctx: vsock_test_ctx,
pkt,
conn,
}
}
fn set_stream(&mut self, stream: TestStream) {
self.conn.stream = stream;
}
fn set_peer_credit(&mut self, credit: u32) {
assert!(credit < self.conn.peer_buf_alloc);
self.conn.peer_fwd_cnt = Wrapping(0);
self.conn.rx_cnt = Wrapping(self.conn.peer_buf_alloc - credit);
assert_eq!(self.conn.peer_avail_credit(), credit as usize);
}
fn send(&mut self) {
self.conn.send_pkt(&self.pkt).unwrap();
}
fn recv(&mut self) {
self.conn.recv_pkt(&mut self.pkt).unwrap();
}
fn notify_epollin(&mut self) {
self.conn.notify(EventSet::IN);
assert!(self.conn.has_pending_rx());
}
fn notify_epollout(&mut self) {
self.conn.notify(EventSet::OUT);
}
fn init_pkt(&mut self, op: u16, len: u32) -> &mut VsockPacket {
init_pkt(&mut self.pkt, op, len)
}
fn init_data_pkt(&mut self, data: &[u8]) -> &VsockPacket {
assert!(data.len() <= self.pkt.buf().unwrap().len());
self.init_pkt(uapi::VSOCK_OP_RW, data.len() as u32);
self.pkt.buf_mut().unwrap()[..data.len()].copy_from_slice(data);
&self.pkt
}
}
#[test]
fn test_peer_request() {
let mut ctx = CsmTestContext::new(ConnState::PeerInit);
assert!(ctx.conn.has_pending_rx());
ctx.recv();
// For peer-initiated requests, our connection should always yield a vsock reponse packet,
// in order to establish the connection.
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
assert_eq!(ctx.pkt.src_cid(), LOCAL_CID);
assert_eq!(ctx.pkt.dst_cid(), PEER_CID);
assert_eq!(ctx.pkt.src_port(), LOCAL_PORT);
assert_eq!(ctx.pkt.dst_port(), PEER_PORT);
assert_eq!(ctx.pkt.type_(), uapi::VSOCK_TYPE_STREAM);
assert_eq!(ctx.pkt.len(), 0);
// After yielding the response packet, the connection should have transitioned to the
// established state.
assert_eq!(ctx.conn.state, ConnState::Established);
}
#[test]
fn test_local_request() {
let mut ctx = CsmTestContext::new(ConnState::LocalInit);
// Host-initiated connections should first yield a connection request packet.
assert!(ctx.conn.has_pending_rx());
// Before yielding the connection request packet, the timeout kill timer shouldn't be
// armed.
assert!(!ctx.conn.will_expire());
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_REQUEST);
// Since the request might time-out, the kill timer should now be armed.
assert!(ctx.conn.will_expire());
assert!(!ctx.conn.has_expired());
ctx.init_pkt(uapi::VSOCK_OP_RESPONSE, 0);
ctx.send();
// Upon receiving a connection response, the connection should have transitioned to the
// established state, and the kill timer should've been disarmed.
assert_eq!(ctx.conn.state, ConnState::Established);
assert!(!ctx.conn.will_expire());
}
#[test]
fn test_local_request_timeout() {
let mut ctx = CsmTestContext::new(ConnState::LocalInit);
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_REQUEST);
assert!(ctx.conn.will_expire());
assert!(!ctx.conn.has_expired());
std::thread::sleep(std::time::Duration::from_millis(
defs::CONN_REQUEST_TIMEOUT_MS,
));
assert!(ctx.conn.has_expired());
}
#[test]
fn test_rx_data() {
let mut ctx = CsmTestContext::new_established();
let data = &[1, 2, 3, 4];
ctx.set_stream(TestStream::new_with_read_buf(data));
assert_eq!(ctx.conn.as_raw_fd(), ctx.conn.stream.as_raw_fd());
ctx.notify_epollin();
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RW);
assert_eq!(ctx.pkt.len() as usize, data.len());
assert_eq!(ctx.pkt.buf().unwrap()[..ctx.pkt.len() as usize], *data);
// There's no more data in the stream, so `recv_pkt` should yield `VsockError::NoData`.
match ctx.conn.recv_pkt(&mut ctx.pkt) {
Err(VsockError::NoData) => (),
other => panic!("{:?}", other),
}
// A recv attempt in an invalid state should yield an instant reset packet.
ctx.conn.state = ConnState::LocalClosed;
ctx.notify_epollin();
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
}
#[test]
fn test_local_close() {
let mut ctx = CsmTestContext::new_established();
let mut stream = TestStream::new();
stream.read_state = StreamState::Closed;
ctx.set_stream(stream);
ctx.notify_epollin();
ctx.recv();
// When the host-side stream is closed, we can neither send not receive any more data.
// Therefore, the vsock shutdown packet that we'll deliver to the guest must contain both
// the no-more-send and the no-more-recv indications.
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_SHUTDOWN);
assert_ne!(ctx.pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_SEND, 0);
assert_ne!(ctx.pkt.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_RCV, 0);
// The kill timer should now be armed.
assert!(ctx.conn.will_expire());
assert!(
ctx.conn.expiry().unwrap()
< Instant::now() + Duration::from_millis(defs::CONN_SHUTDOWN_TIMEOUT_MS)
);
}
#[test]
fn test_peer_close() {
// Test that send/recv shutdown indications are handled correctly.
// I.e. once set, an indication cannot be reset.
{
let mut ctx = CsmTestContext::new_established();
ctx.init_pkt(uapi::VSOCK_OP_SHUTDOWN, 0)
.set_flags(uapi::VSOCK_FLAGS_SHUTDOWN_RCV);
ctx.send();
assert_eq!(ctx.conn.state, ConnState::PeerClosed(true, false));
// Attempting to reset the no-more-recv indication should not work
// (we are only setting the no-more-send indication here).
ctx.pkt.set_flags(uapi::VSOCK_FLAGS_SHUTDOWN_SEND);
ctx.send();
assert_eq!(ctx.conn.state, ConnState::PeerClosed(true, true));
}
// Test case:
// - reading data from a no-more-send connection should work; and
// - writing data should have no effect.
{
let data = &[1, 2, 3, 4];
let mut ctx = CsmTestContext::new_established();
ctx.set_stream(TestStream::new_with_read_buf(data));
ctx.init_pkt(uapi::VSOCK_OP_SHUTDOWN, 0)
.set_flags(uapi::VSOCK_FLAGS_SHUTDOWN_SEND);
ctx.send();
ctx.notify_epollin();
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RW);
assert_eq!(&ctx.pkt.buf().unwrap()[..ctx.pkt.len() as usize], data);
ctx.init_data_pkt(data);
ctx.send();
assert_eq!(ctx.conn.stream.write_buf.len(), 0);
assert!(ctx.conn.tx_buf.is_empty());
}
// Test case:
// - writing data to a no-more-recv connection should work; and
// - attempting to read data from it should yield an RST packet.
{
let mut ctx = CsmTestContext::new_established();
ctx.init_pkt(uapi::VSOCK_OP_SHUTDOWN, 0)
.set_flags(uapi::VSOCK_FLAGS_SHUTDOWN_RCV);
ctx.send();
let data = &[1, 2, 3, 4];
ctx.init_data_pkt(data);
ctx.send();
assert_eq!(ctx.conn.stream.write_buf, data.to_vec());
ctx.notify_epollin();
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
}
// Test case: setting both no-more-send and no-more-recv indications should have the
// connection confirm termination (i.e. yield an RST).
{
let mut ctx = CsmTestContext::new_established();
ctx.init_pkt(uapi::VSOCK_OP_SHUTDOWN, 0)
.set_flags(uapi::VSOCK_FLAGS_SHUTDOWN_RCV | uapi::VSOCK_FLAGS_SHUTDOWN_SEND);
ctx.send();
assert!(ctx.conn.has_pending_rx());
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
}
}
#[test]
fn test_local_read_error() {
let mut ctx = CsmTestContext::new_established();
let mut stream = TestStream::new();
stream.read_state = StreamState::Error(ErrorKind::PermissionDenied);
ctx.set_stream(stream);
ctx.notify_epollin();
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
}
#[test]
fn test_credit_request_to_peer() {
let mut ctx = CsmTestContext::new_established();
ctx.set_peer_credit(0);
ctx.notify_epollin();
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_CREDIT_REQUEST);
}
#[test]
fn test_credit_request_from_peer() {
let mut ctx = CsmTestContext::new_established();
ctx.init_pkt(uapi::VSOCK_OP_CREDIT_REQUEST, 0);
ctx.send();
assert!(ctx.conn.has_pending_rx());
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_CREDIT_UPDATE);
assert_eq!(ctx.pkt.buf_alloc(), csm_defs::CONN_TX_BUF_SIZE as u32);
assert_eq!(ctx.pkt.fwd_cnt(), ctx.conn.fwd_cnt.0);
}
#[test]
fn test_credit_update_to_peer() {
let mut ctx = CsmTestContext::new_established();
// Force a stale state, where the peer hasn't been updated on our credit situation.
ctx.conn.last_fwd_cnt_to_peer = Wrapping(0);
ctx.conn.fwd_cnt = Wrapping(csm_defs::CONN_CREDIT_UPDATE_THRESHOLD as u32);
// Fake a data send from the peer, to bring us over the credit update threshold.
let data = &[1, 2, 3, 4];
ctx.init_data_pkt(data);
ctx.send();
// The CSM should now have a credit update available for the peer.
assert!(ctx.conn.has_pending_rx());
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_CREDIT_UPDATE);
assert_eq!(
ctx.pkt.fwd_cnt() as usize,
csm_defs::CONN_CREDIT_UPDATE_THRESHOLD + data.len()
);
assert_eq!(ctx.conn.fwd_cnt, ctx.conn.last_fwd_cnt_to_peer);
}
#[test]
fn test_tx_buffering() {
// Test case:
// - when writing to the backing stream would block, TX data should end up in the TX buf
// - when the CSM is notified that it can write to the backing stream, it should flush
// the TX buf.
{
let mut ctx = CsmTestContext::new_established();
let mut stream = TestStream::new();
stream.write_state = StreamState::WouldBlock;
ctx.set_stream(stream);
// Send some data through the connection. The backing stream is set to reject writes,
// so the data should end up in the TX buffer.
let data = &[1, 2, 3, 4];
ctx.init_data_pkt(data);
ctx.send();
// When there's data in the TX buffer, the connection should ask to be notified when it
// can write to its backing stream.
assert!(ctx.conn.get_polled_evset().contains(EventSet::OUT));
assert_eq!(ctx.conn.tx_buf.len(), data.len());
// Unlock the write stream and notify the connection it can now write its bufferred
// data.
ctx.set_stream(TestStream::new());
ctx.conn.notify(EventSet::OUT);
assert!(ctx.conn.tx_buf.is_empty());
assert_eq!(ctx.conn.stream.write_buf, data);
}
}
#[test]
fn test_stream_write_error() {
// Test case: sending a data packet to a broken / closed backing stream should kill it.
{
let mut ctx = CsmTestContext::new_established();
let mut stream = TestStream::new();
stream.write_state = StreamState::Closed;
ctx.set_stream(stream);
let data = &[1, 2, 3, 4];
ctx.init_data_pkt(data);
ctx.send();
assert_eq!(ctx.conn.state, ConnState::Killed);
assert!(ctx.conn.has_pending_rx());
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
}
// Test case: notifying a connection that it can flush its TX buffer to a broken stream
// should kill the connection.
{
let mut ctx = CsmTestContext::new_established();
let mut stream = TestStream::new();
stream.write_state = StreamState::WouldBlock;
ctx.set_stream(stream);
// Send some data through the connection. The backing stream is set to reject writes,
// so the data should end up in the TX buffer.
let data = &[1, 2, 3, 4];
ctx.init_data_pkt(data);
ctx.send();
// Set the backing stream to error out on write.
let mut stream = TestStream::new();
stream.write_state = StreamState::Closed;
ctx.set_stream(stream);
assert!(ctx.conn.get_polled_evset().contains(EventSet::OUT));
ctx.notify_epollout();
assert_eq!(ctx.conn.state, ConnState::Killed);
}
}
#[test]
fn test_peer_credit_misbehavior() {
let mut ctx = CsmTestContext::new_established();
let mut stream = TestStream::new();
stream.write_state = StreamState::WouldBlock;
ctx.set_stream(stream);
// Fill up the TX buffer.
let data = vec![0u8; ctx.pkt.buf().unwrap().len()];
ctx.init_data_pkt(data.as_slice());
for _i in 0..(csm_defs::CONN_TX_BUF_SIZE / data.len()) {
ctx.send();
}
// Then try to send more data.
ctx.send();
// The connection should've committed suicide.
assert_eq!(ctx.conn.state, ConnState::Killed);
assert!(ctx.conn.has_pending_rx());
ctx.recv();
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RST);
}
}
| 41.230375 | 103 | 0.582199 |
d978beac5213ad1574e51e944aeb009fd54066fa | 551 | use super::super::format;
#[derive(Debug, Clone)]
pub struct RepositoryInfo {
pub last_commit_date: String,
pub stars: Option<i64>,
pub issues: Option<i64>,
}
pub fn fields(json: serde_json::value::Value) -> RepositoryInfo {
let mut last_commit_date = format::remove_quotes(json["updated_at"].to_string());
last_commit_date.truncate(10);
let stars = json["stargazers_count"].as_i64();
let issues = json["open_issues_count"].as_i64();
RepositoryInfo {
last_commit_date,
stars,
issues,
}
}
| 25.045455 | 85 | 0.667877 |
724df67196a92641859f9b4fadf1075dbf638435 | 2,694 | use mayastor::{
core::{
mayastor_env_stop,
MayastorCliArgs,
MayastorEnvironment,
Reactor,
UntypedBdev,
},
nexus_uri::bdev_create,
subsys::{NvmfSubsystem, SubType},
};
pub mod common;
static DISKNAME1: &str = "/tmp/disk1.img";
static BDEVNAME1: &str = "aio:///tmp/disk1.img?blk_size=512";
#[test]
fn nvmf_target() {
common::mayastor_test_init();
common::truncate_file(DISKNAME1, 64 * 1024);
let args = MayastorCliArgs {
reactor_mask: "0x3".into(),
..Default::default()
};
MayastorEnvironment::new(args)
.start(|| {
// test we can create a nvmf subsystem
Reactor::block_on(async {
let b = bdev_create(BDEVNAME1).await.unwrap();
let bdev = UntypedBdev::lookup_by_name(&b).unwrap();
let ss = NvmfSubsystem::try_from(&bdev).unwrap();
ss.start().await.unwrap();
});
// test we can not create the same one again
Reactor::block_on(async {
let bdev = UntypedBdev::lookup_by_name(BDEVNAME1).unwrap();
let should_err = NvmfSubsystem::try_from(&bdev);
assert!(should_err.is_err());
});
// we should have at least 2 subsystems
Reactor::block_on(async {
assert_eq!(
NvmfSubsystem::first().unwrap().into_iter().count(),
2
);
});
// verify the bdev is claimed by our target -- make sure we skip
// over the discovery controller
Reactor::block_on(async {
let bdev = UntypedBdev::bdev_first().unwrap();
assert!(bdev.is_claimed());
assert_eq!(bdev.claimed_by().unwrap(), "NVMe-oF Target");
let ss = NvmfSubsystem::first().unwrap();
for s in ss {
if s.subtype() == SubType::Discovery {
continue;
}
s.stop().await.unwrap();
let sbdev = s.bdev().unwrap();
assert_eq!(sbdev.name(), bdev.name());
assert!(bdev.is_claimed());
assert_eq!(bdev.claimed_by().unwrap(), "NVMe-oF Target");
s.destroy();
assert!(!bdev.is_claimed());
assert_eq!(bdev.claimed_by(), None);
}
});
// this should clean/up kill the discovery controller
mayastor_env_stop(0);
})
.unwrap();
common::delete_file(&[DISKNAME1.into()]);
}
| 32.071429 | 77 | 0.498515 |
26a174fe60a3d6790cf77416c4aeb029d60f22f0 | 501 | use rustc_serialize::hex::ToHex;
pub fn run() {
let input = String::from("Burning 'em, if you ain't quick and nimble\nI go crazy when I hear a cymbal");
let key = String::from("ICE");
let input_bytes = input.into_bytes();
let key_bytes = key.into_bytes();
let output = key_bytes
.iter()
.cycle()
.zip(input_bytes)
.map(|(byte1, byte2)| byte1 ^ byte2)
.collect::<Vec<u8>>()
.to_hex();
println!("Challenge 5 : {}", output);
}
| 23.857143 | 108 | 0.572854 |
9020bb7b0fb2ebd6fb0637c2ce76813f87d57bdf | 1,311 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(plugin_registrar)]
#![feature(box_syntax)]
extern crate syntax;
// Load rustc as a plugin to get macros
#[macro_use]
extern crate rustc;
use syntax::ast;
use syntax::parse::token;
use rustc::lint::{Context, LintPass, LintPassObject, LintArray};
use rustc::plugin::Registry;
declare_lint!(TEST_LINT, Warn, "Warn about items named 'lintme'");
struct Pass;
impl LintPass for Pass {
fn get_lints(&self) -> LintArray {
lint_array!(TEST_LINT)
}
fn check_item(&mut self, cx: &Context, it: &ast::Item) {
let name = token::get_ident(it.ident);
if &name[] == "lintme" {
cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'");
}
}
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_lint_pass(box Pass as LintPassObject);
}
| 27.3125 | 71 | 0.692601 |
d7ac6745658ce015f122d093d7b8d3eda90614b1 | 588,160 | //! The `bank` module tracks client accounts and the progress of on-chain
//! programs.
//!
//! A single bank relates to a block produced by a single leader and each bank
//! except for the genesis bank points back to a parent bank.
//!
//! The bank is the main entrypoint for processing verified transactions with the function
//! `Bank::process_transactions`
//!
//! It does this by loading the accounts using the reference it holds on the account store,
//! and then passing those to an InvokeContext which handles loading the programs specified
//! by the Transaction and executing it.
//!
//! The bank then stores the results to the accounts store.
//!
//! It then has apis for retrieving if a transaction has been processed and it's status.
//! See `get_signature_status` et al.
//!
//! Bank lifecycle:
//!
//! A bank is newly created and open to transactions. Transactions are applied
//! until either the bank reached the tick count when the node is the leader for that slot, or the
//! node has applied all transactions present in all `Entry`s in the slot.
//!
//! Once it is complete, the bank can then be frozen. After frozen, no more transactions can
//! be applied or state changes made. At the frozen step, rent will be applied and various
//! sysvar special accounts update to the new state of the system.
//!
//! After frozen, and the bank has had the appropriate number of votes on it, then it can become
//! rooted. At this point, it will not be able to be removed from the chain and the
//! state is finalized.
//!
//! It offers a high-level API that signs transactions
//! on behalf of the caller, and a low-level API for when they have
//! already been signed and verified.
#[allow(deprecated)]
use solana_sdk::recent_blockhashes_account;
use {
crate::{
accounts::{AccountAddressFilter, Accounts, TransactionLoadResult},
accounts_db::{
AccountShrinkThreshold, AccountsDbConfig, ErrorCounters, SnapshotStorages,
ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING,
},
accounts_index::{AccountSecondaryIndexes, IndexKey, ScanConfig, ScanResult},
accounts_update_notifier_interface::AccountsUpdateNotifier,
ancestors::{Ancestors, AncestorsForSerialization},
blockhash_queue::BlockhashQueue,
builtins::{self, ActivationType, Builtin, Builtins},
cost_tracker::CostTracker,
epoch_stakes::{EpochStakes, NodeVoteAccounts},
inline_spl_token,
message_processor::MessageProcessor,
rent_collector::RentCollector,
stake_weighted_timestamp::{
calculate_stake_weighted_timestamp, MaxAllowableDrift, MAX_ALLOWABLE_DRIFT_PERCENTAGE,
MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW,
},
stakes::{InvalidCacheEntryReason, Stakes, StakesCache},
status_cache::{SlotDelta, StatusCache},
system_instruction_processor::{get_system_account_kind, SystemAccountKind},
transaction_batch::TransactionBatch,
vote_account::VoteAccount,
},
byteorder::{ByteOrder, LittleEndian},
dashmap::DashMap,
itertools::Itertools,
log::*,
rayon::{
iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator},
ThreadPool, ThreadPoolBuilder,
},
solana_measure::measure::Measure,
solana_metrics::{inc_new_counter_debug, inc_new_counter_info},
solana_program_runtime::{
instruction_recorder::InstructionRecorder,
invoke_context::{BuiltinProgram, Executor, Executors, ProcessInstructionWithContext},
log_collector::LogCollector,
timings::ExecuteDetailsTimings,
},
solana_sdk::{
account::{
create_account_shared_data_with_fields as create_account, from_account, Account,
AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount,
},
account_utils::StateMut,
clock::{
BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_TICKS_PER_SECOND,
INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES,
MAX_TRANSACTION_FORWARDING_DELAY, SECONDS_PER_DAY,
},
compute_budget::ComputeBudget,
ed25519_program,
epoch_info::EpochInfo,
epoch_schedule::EpochSchedule,
feature,
feature_set::{
self, disable_fee_calculator, nonce_must_be_writable, tx_wide_compute_cap, FeatureSet,
},
fee_calculator::{FeeCalculator, FeeRateGovernor},
genesis_config::{ClusterType, GenesisConfig},
hard_forks::HardForks,
hash::{extend_and_hash, hashv, Hash},
incinerator,
inflation::Inflation,
instruction::CompiledInstruction,
lamports::LamportsError,
message::SanitizedMessage,
native_loader,
native_token::sol_to_lamports,
nonce, nonce_account,
packet::PACKET_DATA_SIZE,
precompiles::get_precompiles,
program_utils::limited_deserialize,
pubkey::Pubkey,
secp256k1_program,
signature::{Keypair, Signature},
slot_hashes::SlotHashes,
slot_history::SlotHistory,
system_transaction,
sysvar::{self, Sysvar, SysvarId},
timing::years_as_slots,
transaction::{
Result, SanitizedTransaction, Transaction, TransactionError,
TransactionVerificationMode, VersionedTransaction,
},
transaction_context::{TransactionAccount, TransactionContext},
},
solana_stake_program::stake_state::{
self, InflationPointCalculationEvent, PointValue, StakeState,
},
solana_vote_program::{
vote_instruction::VoteInstruction,
vote_state::{VoteState, VoteStateVersions},
},
std::{
borrow::Cow,
cell::RefCell,
collections::{HashMap, HashSet},
convert::{TryFrom, TryInto},
fmt, mem,
ops::RangeInclusive,
path::PathBuf,
ptr,
rc::Rc,
sync::{
atomic::{
AtomicBool, AtomicU64,
Ordering::{Acquire, Relaxed, Release},
},
Arc, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard,
},
time::{Duration, Instant},
},
};
pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0;
pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
#[derive(Clone, Debug, PartialEq)]
pub struct RentDebit {
rent_collected: u64,
post_balance: u64,
}
impl RentDebit {
fn try_into_reward_info(self) -> Option<RewardInfo> {
let rent_debit = i64::try_from(self.rent_collected)
.ok()
.and_then(|r| r.checked_neg());
rent_debit.map(|rent_debit| RewardInfo {
reward_type: RewardType::Rent,
lamports: rent_debit,
post_balance: self.post_balance,
commission: None, // Not applicable
})
}
}
#[derive(Clone, Debug, Default, PartialEq)]
pub struct RentDebits(HashMap<Pubkey, RentDebit>);
impl RentDebits {
fn get_account_rent_debit(&self, address: &Pubkey) -> u64 {
self.0
.get(address)
.map(|r| r.rent_collected)
.unwrap_or_default()
}
pub fn insert(&mut self, address: &Pubkey, rent_collected: u64, post_balance: u64) {
if rent_collected != 0 {
self.0.insert(
*address,
RentDebit {
rent_collected,
post_balance,
},
);
}
}
pub fn into_unordered_rewards_iter(self) -> impl Iterator<Item = (Pubkey, RewardInfo)> {
self.0
.into_iter()
.filter_map(|(address, rent_debit)| Some((address, rent_debit.try_into_reward_info()?)))
}
}
#[derive(Default, Debug)]
pub struct ExecuteTimings {
pub check_us: u64,
pub load_us: u64,
pub execute_us: u64,
pub store_us: u64,
pub update_stakes_cache_us: u64,
pub total_batches_len: usize,
pub num_execute_batches: u64,
pub details: ExecuteDetailsTimings,
}
impl ExecuteTimings {
pub fn accumulate(&mut self, other: &ExecuteTimings) {
self.check_us = self.check_us.saturating_add(other.check_us);
self.load_us = self.load_us.saturating_add(other.load_us);
self.execute_us = self.execute_us.saturating_add(other.execute_us);
self.store_us = self.store_us.saturating_add(other.store_us);
self.update_stakes_cache_us = self
.update_stakes_cache_us
.saturating_add(other.update_stakes_cache_us);
self.total_batches_len = self
.total_batches_len
.saturating_add(other.total_batches_len);
self.num_execute_batches = self
.num_execute_batches
.saturating_add(other.num_execute_batches);
self.details.accumulate(&other.details);
}
}
type BankStatusCache = StatusCache<Result<()>>;
#[frozen_abi(digest = "2pPboTQ9ixNuR1hvRt7McJriam5EHfd3vpBWfxnVbmF3")]
pub type BankSlotDelta = SlotDelta<Result<()>>;
// Eager rent collection repeats in cyclic manner.
// Each cycle is composed of <partition_count> number of tiny pubkey subranges
// to scan, which is always multiple of the number of slots in epoch.
type PartitionIndex = u64;
type PartitionsPerCycle = u64;
type Partition = (PartitionIndex, PartitionIndex, PartitionsPerCycle);
type RentCollectionCycleParams = (
Epoch,
SlotCount,
bool,
Epoch,
EpochCount,
PartitionsPerCycle,
);
pub struct SquashTiming {
pub squash_accounts_ms: u64,
pub squash_accounts_cache_ms: u64,
pub squash_accounts_index_ms: u64,
pub squash_accounts_store_ms: u64,
pub squash_cache_ms: u64,
}
type EpochCount = u64;
/// Copy-on-write holder of CachedExecutors
#[derive(AbiExample, Debug, Default)]
struct CowCachedExecutors {
shared: bool,
executors: Arc<RwLock<CachedExecutors>>,
}
impl Clone for CowCachedExecutors {
fn clone(&self) -> Self {
Self {
shared: true,
executors: self.executors.clone(),
}
}
}
impl CowCachedExecutors {
fn clone_with_epoch(&self, epoch: u64) -> Self {
let executors_raw = self.read().unwrap();
if executors_raw.current_epoch() == epoch {
self.clone()
} else {
Self {
shared: false,
executors: Arc::new(RwLock::new(executors_raw.clone_with_epoch(epoch))),
}
}
}
fn new(executors: Arc<RwLock<CachedExecutors>>) -> Self {
Self {
shared: true,
executors,
}
}
fn read(&self) -> LockResult<RwLockReadGuard<CachedExecutors>> {
self.executors.read()
}
fn write(&mut self) -> LockResult<RwLockWriteGuard<CachedExecutors>> {
if self.shared {
self.shared = false;
let local_cache = (*self.executors.read().unwrap()).clone();
self.executors = Arc::new(RwLock::new(local_cache));
}
self.executors.write()
}
}
const MAX_CACHED_EXECUTORS: usize = 100; // 10 MB assuming programs are around 100k
#[derive(Debug)]
struct CachedExecutorsEntry {
prev_epoch_count: u64,
epoch_count: AtomicU64,
executor: Arc<dyn Executor>,
}
/// LFU Cache of executors with single-epoch memory of usage counts
#[derive(Debug)]
struct CachedExecutors {
max: usize,
current_epoch: Epoch,
executors: HashMap<Pubkey, CachedExecutorsEntry>,
}
impl Default for CachedExecutors {
fn default() -> Self {
Self {
max: MAX_CACHED_EXECUTORS,
current_epoch: 0,
executors: HashMap::new(),
}
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl AbiExample for CachedExecutors {
fn example() -> Self {
// Delegate AbiExample impl to Default before going deep and stuck with
// not easily impl-able Arc<dyn Executor> due to rust's coherence issue
// This is safe because CachedExecutors isn't serializable by definition.
Self::default()
}
}
impl Clone for CachedExecutors {
fn clone(&self) -> Self {
self.clone_with_epoch(self.current_epoch)
}
}
impl CachedExecutors {
fn current_epoch(&self) -> Epoch {
self.current_epoch
}
fn clone_with_epoch(&self, epoch: Epoch) -> Self {
let mut executors = HashMap::new();
for (key, entry) in self.executors.iter() {
// The total_count = prev_epoch_count + epoch_count will be used for LFU eviction.
// If the epoch has changed, we store the prev_epoch_count and reset the epoch_count to 0.
if epoch > self.current_epoch {
executors.insert(
*key,
CachedExecutorsEntry {
prev_epoch_count: entry.epoch_count.load(Relaxed),
epoch_count: AtomicU64::new(0),
executor: entry.executor.clone(),
},
);
} else {
executors.insert(
*key,
CachedExecutorsEntry {
prev_epoch_count: entry.prev_epoch_count,
epoch_count: AtomicU64::new(entry.epoch_count.load(Relaxed)),
executor: entry.executor.clone(),
},
);
}
}
Self {
max: self.max,
current_epoch: epoch,
executors,
}
}
fn new(max: usize, current_epoch: Epoch) -> Self {
Self {
max,
current_epoch,
executors: HashMap::new(),
}
}
fn get(&self, pubkey: &Pubkey) -> Option<Arc<dyn Executor>> {
self.executors.get(pubkey).map(|entry| {
entry.epoch_count.fetch_add(1, Relaxed);
entry.executor.clone()
})
}
fn put(&mut self, pubkey: &Pubkey, executor: Arc<dyn Executor>) {
if !self.executors.contains_key(pubkey) && self.executors.len() >= self.max {
let mut least = u64::MAX;
let default_key = Pubkey::default();
let mut least_key = &default_key;
for (key, entry) in self.executors.iter() {
let count = entry.prev_epoch_count + entry.epoch_count.load(Relaxed);
if count < least {
least = count;
least_key = key;
}
}
let least_key = *least_key;
let _ = self.executors.remove(&least_key);
}
let _ = self.executors.insert(
*pubkey,
CachedExecutorsEntry {
prev_epoch_count: 0,
epoch_count: AtomicU64::new(0),
executor,
},
);
}
fn remove(&mut self, pubkey: &Pubkey) {
let _ = self.executors.remove(pubkey);
}
}
#[derive(Debug)]
pub struct BankRc {
/// where all the Accounts are stored
pub accounts: Arc<Accounts>,
/// Previous checkpoint of this bank
pub(crate) parent: RwLock<Option<Arc<Bank>>>,
/// Current slot
pub(crate) slot: Slot,
pub(crate) bank_id_generator: Arc<AtomicU64>,
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::AbiExample;
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl AbiExample for BankRc {
fn example() -> Self {
BankRc {
// Set parent to None to cut the recursion into another Bank
parent: RwLock::new(None),
// AbiExample for Accounts is specially implemented to contain a storage example
accounts: AbiExample::example(),
slot: AbiExample::example(),
bank_id_generator: Arc::new(AtomicU64::new(0)),
}
}
}
impl BankRc {
pub(crate) fn new(accounts: Accounts, slot: Slot) -> Self {
Self {
accounts: Arc::new(accounts),
parent: RwLock::new(None),
slot,
bank_id_generator: Arc::new(AtomicU64::new(0)),
}
}
}
#[derive(Default, Debug, AbiExample)]
pub struct StatusCacheRc {
/// where all the Accounts are stored
/// A cache of signature statuses
pub status_cache: Arc<RwLock<BankStatusCache>>,
}
impl StatusCacheRc {
pub fn slot_deltas(&self, slots: &[Slot]) -> Vec<BankSlotDelta> {
let sc = self.status_cache.read().unwrap();
sc.slot_deltas(slots)
}
pub fn roots(&self) -> Vec<Slot> {
self.status_cache
.read()
.unwrap()
.roots()
.iter()
.cloned()
.sorted()
.collect()
}
pub fn append(&self, slot_deltas: &[BankSlotDelta]) {
let mut sc = self.status_cache.write().unwrap();
sc.append(slot_deltas);
}
}
pub type TransactionCheckResult = (Result<()>, Option<NoncePartial>);
pub type TransactionExecutionResult = (Result<()>, Option<NonceFull>);
pub struct TransactionResults {
pub fee_collection_results: Vec<Result<()>>,
pub execution_results: Vec<TransactionExecutionResult>,
pub rent_debits: Vec<RentDebits>,
}
pub struct TransactionSimulationResult {
pub result: Result<()>,
pub logs: TransactionLogMessages,
pub post_simulation_accounts: Vec<TransactionAccount>,
pub units_consumed: u64,
}
pub struct TransactionBalancesSet {
pub pre_balances: TransactionBalances,
pub post_balances: TransactionBalances,
}
impl TransactionBalancesSet {
pub fn new(pre_balances: TransactionBalances, post_balances: TransactionBalances) -> Self {
assert_eq!(pre_balances.len(), post_balances.len());
Self {
pre_balances,
post_balances,
}
}
}
pub type TransactionBalances = Vec<Vec<u64>>;
/// An ordered list of instructions that were invoked during a transaction instruction
pub type InnerInstructions = Vec<CompiledInstruction>;
/// A list of instructions that were invoked during each instruction of a transaction
pub type InnerInstructionsList = Vec<InnerInstructions>;
/// A list of log messages emitted during a transaction
pub type TransactionLogMessages = Vec<String>;
#[derive(Serialize, Deserialize, AbiExample, AbiEnumVisitor, Debug, PartialEq)]
pub enum TransactionLogCollectorFilter {
All,
AllWithVotes,
None,
OnlyMentionedAddresses,
}
impl Default for TransactionLogCollectorFilter {
fn default() -> Self {
Self::None
}
}
#[derive(AbiExample, Debug, Default)]
pub struct TransactionLogCollectorConfig {
pub mentioned_addresses: HashSet<Pubkey>,
pub filter: TransactionLogCollectorFilter,
}
#[derive(AbiExample, Clone, Debug, PartialEq)]
pub struct TransactionLogInfo {
pub signature: Signature,
pub result: Result<()>,
pub is_vote: bool,
pub log_messages: TransactionLogMessages,
}
#[derive(AbiExample, Default, Debug)]
pub struct TransactionLogCollector {
// All the logs collected for from this Bank. Exact contents depend on the
// active `TransactionLogCollectorFilter`
pub logs: Vec<TransactionLogInfo>,
// For each `mentioned_addresses`, maintain a list of indices into `logs` to easily
// locate the logs from transactions that included the mentioned addresses.
pub mentioned_address_map: HashMap<Pubkey, Vec<usize>>,
}
impl TransactionLogCollector {
pub fn get_logs_for_address(
&self,
address: Option<&Pubkey>,
) -> Option<Vec<TransactionLogInfo>> {
match address {
None => Some(self.logs.clone()),
Some(address) => self.mentioned_address_map.get(address).map(|log_indices| {
log_indices
.iter()
.filter_map(|i| self.logs.get(*i).cloned())
.collect()
}),
}
}
}
pub trait NonceInfo {
fn address(&self) -> &Pubkey;
fn account(&self) -> &AccountSharedData;
fn lamports_per_signature(&self) -> Option<u64>;
fn fee_payer_account(&self) -> Option<&AccountSharedData>;
}
/// Holds limited nonce info available during transaction checks
#[derive(Clone, Debug, Default, PartialEq)]
pub struct NoncePartial {
address: Pubkey,
account: AccountSharedData,
}
impl NoncePartial {
pub fn new(address: Pubkey, account: AccountSharedData) -> Self {
Self { address, account }
}
}
impl NonceInfo for NoncePartial {
fn address(&self) -> &Pubkey {
&self.address
}
fn account(&self) -> &AccountSharedData {
&self.account
}
fn lamports_per_signature(&self) -> Option<u64> {
nonce_account::lamports_per_signature_of(&self.account)
}
fn fee_payer_account(&self) -> Option<&AccountSharedData> {
None
}
}
/// Holds fee subtracted nonce info
#[derive(Clone, Debug, Default, PartialEq)]
pub struct NonceFull {
address: Pubkey,
account: AccountSharedData,
fee_payer_account: Option<AccountSharedData>,
}
impl NonceFull {
pub fn new(
address: Pubkey,
account: AccountSharedData,
fee_payer_account: Option<AccountSharedData>,
) -> Self {
Self {
address,
account,
fee_payer_account,
}
}
pub fn from_partial(
partial: NoncePartial,
message: &SanitizedMessage,
accounts: &[TransactionAccount],
rent_debits: &RentDebits,
) -> Result<Self> {
let fee_payer = (0..message.account_keys_len()).find_map(|i| {
if let Some((k, a)) = &accounts.get(i) {
if message.is_non_loader_key(i) {
return Some((k, a));
}
}
None
});
if let Some((fee_payer_address, fee_payer_account)) = fee_payer {
let mut fee_payer_account = fee_payer_account.clone();
let rent_debit = rent_debits.get_account_rent_debit(fee_payer_address);
fee_payer_account.set_lamports(fee_payer_account.lamports().saturating_add(rent_debit));
let nonce_address = *partial.address();
if *fee_payer_address == nonce_address {
Ok(Self::new(nonce_address, fee_payer_account, None))
} else {
Ok(Self::new(
nonce_address,
partial.account().clone(),
Some(fee_payer_account),
))
}
} else {
Err(TransactionError::AccountNotFound)
}
}
}
impl NonceInfo for NonceFull {
fn address(&self) -> &Pubkey {
&self.address
}
fn account(&self) -> &AccountSharedData {
&self.account
}
fn lamports_per_signature(&self) -> Option<u64> {
nonce_account::lamports_per_signature_of(&self.account)
}
fn fee_payer_account(&self) -> Option<&AccountSharedData> {
self.fee_payer_account.as_ref()
}
}
// Bank's common fields shared by all supported snapshot versions for deserialization.
// Sync fields with BankFieldsToSerialize! This is paired with it.
// All members are made public to remain Bank's members private and to make versioned deserializer workable on this
#[derive(Clone, Debug, Default)]
pub(crate) struct BankFieldsToDeserialize {
pub(crate) blockhash_queue: BlockhashQueue,
pub(crate) ancestors: AncestorsForSerialization,
pub(crate) hash: Hash,
pub(crate) parent_hash: Hash,
pub(crate) parent_slot: Slot,
pub(crate) hard_forks: HardForks,
pub(crate) transaction_count: u64,
pub(crate) tick_height: u64,
pub(crate) signature_count: u64,
pub(crate) capitalization: u64,
pub(crate) max_tick_height: u64,
pub(crate) hashes_per_tick: Option<u64>,
pub(crate) ticks_per_slot: u64,
pub(crate) ns_per_slot: u128,
pub(crate) genesis_creation_time: UnixTimestamp,
pub(crate) slots_per_year: f64,
#[allow(dead_code)]
pub(crate) unused: u64,
pub(crate) slot: Slot,
pub(crate) epoch: Epoch,
pub(crate) block_height: u64,
pub(crate) collector_id: Pubkey,
pub(crate) collector_fees: u64,
pub(crate) fee_calculator: FeeCalculator,
pub(crate) fee_rate_governor: FeeRateGovernor,
pub(crate) collected_rent: u64,
pub(crate) rent_collector: RentCollector,
pub(crate) epoch_schedule: EpochSchedule,
pub(crate) inflation: Inflation,
pub(crate) stakes: Stakes,
pub(crate) epoch_stakes: HashMap<Epoch, EpochStakes>,
pub(crate) is_delta: bool,
}
// Bank's common fields shared by all supported snapshot versions for serialization.
// This is separated from BankFieldsToDeserialize to avoid cloning by using refs.
// So, sync fields with BankFieldsToDeserialize!
// all members are made public to keep Bank private and to make versioned serializer workable on this
#[derive(Debug)]
pub(crate) struct BankFieldsToSerialize<'a> {
pub(crate) blockhash_queue: &'a RwLock<BlockhashQueue>,
pub(crate) ancestors: &'a AncestorsForSerialization,
pub(crate) hash: Hash,
pub(crate) parent_hash: Hash,
pub(crate) parent_slot: Slot,
pub(crate) hard_forks: &'a RwLock<HardForks>,
pub(crate) transaction_count: u64,
pub(crate) tick_height: u64,
pub(crate) signature_count: u64,
pub(crate) capitalization: u64,
pub(crate) max_tick_height: u64,
pub(crate) hashes_per_tick: Option<u64>,
pub(crate) ticks_per_slot: u64,
pub(crate) ns_per_slot: u128,
pub(crate) genesis_creation_time: UnixTimestamp,
pub(crate) slots_per_year: f64,
pub(crate) unused: u64,
pub(crate) slot: Slot,
pub(crate) epoch: Epoch,
pub(crate) block_height: u64,
pub(crate) collector_id: Pubkey,
pub(crate) collector_fees: u64,
pub(crate) fee_calculator: FeeCalculator,
pub(crate) fee_rate_governor: FeeRateGovernor,
pub(crate) collected_rent: u64,
pub(crate) rent_collector: RentCollector,
pub(crate) epoch_schedule: EpochSchedule,
pub(crate) inflation: Inflation,
pub(crate) stakes: &'a StakesCache,
pub(crate) epoch_stakes: &'a HashMap<Epoch, EpochStakes>,
pub(crate) is_delta: bool,
}
// Can't derive PartialEq because RwLock doesn't implement PartialEq
impl PartialEq for Bank {
fn eq(&self, other: &Self) -> bool {
if ptr::eq(self, other) {
return true;
}
*self.blockhash_queue.read().unwrap() == *other.blockhash_queue.read().unwrap()
&& self.ancestors == other.ancestors
&& *self.hash.read().unwrap() == *other.hash.read().unwrap()
&& self.parent_hash == other.parent_hash
&& self.parent_slot == other.parent_slot
&& *self.hard_forks.read().unwrap() == *other.hard_forks.read().unwrap()
&& self.transaction_count.load(Relaxed) == other.transaction_count.load(Relaxed)
&& self.tick_height.load(Relaxed) == other.tick_height.load(Relaxed)
&& self.signature_count.load(Relaxed) == other.signature_count.load(Relaxed)
&& self.capitalization.load(Relaxed) == other.capitalization.load(Relaxed)
&& self.max_tick_height == other.max_tick_height
&& self.hashes_per_tick == other.hashes_per_tick
&& self.ticks_per_slot == other.ticks_per_slot
&& self.ns_per_slot == other.ns_per_slot
&& self.genesis_creation_time == other.genesis_creation_time
&& self.slots_per_year == other.slots_per_year
&& self.unused == other.unused
&& self.slot == other.slot
&& self.epoch == other.epoch
&& self.block_height == other.block_height
&& self.collector_id == other.collector_id
&& self.collector_fees.load(Relaxed) == other.collector_fees.load(Relaxed)
&& self.fee_calculator == other.fee_calculator
&& self.fee_rate_governor == other.fee_rate_governor
&& self.collected_rent.load(Relaxed) == other.collected_rent.load(Relaxed)
&& self.rent_collector == other.rent_collector
&& self.epoch_schedule == other.epoch_schedule
&& *self.inflation.read().unwrap() == *other.inflation.read().unwrap()
&& *self.stakes_cache.stakes() == *other.stakes_cache.stakes()
&& self.epoch_stakes == other.epoch_stakes
&& self.is_delta.load(Relaxed) == other.is_delta.load(Relaxed)
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize, AbiExample, AbiEnumVisitor, Clone, Copy)]
pub enum RewardType {
Fee,
Rent,
Staking,
Voting,
}
#[derive(Debug)]
pub enum RewardCalculationEvent<'a, 'b> {
Staking(&'a Pubkey, &'b InflationPointCalculationEvent),
}
fn null_tracer() -> Option<impl Fn(&RewardCalculationEvent) + Send + Sync> {
None::<fn(&RewardCalculationEvent)>
}
impl fmt::Display for RewardType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
RewardType::Fee => "fee",
RewardType::Rent => "rent",
RewardType::Staking => "staking",
RewardType::Voting => "voting",
}
)
}
}
pub trait DropCallback: fmt::Debug {
fn callback(&self, b: &Bank);
fn clone_box(&self) -> Box<dyn DropCallback + Send + Sync>;
}
#[derive(Debug, PartialEq, Serialize, Deserialize, AbiExample, Clone, Copy)]
pub struct RewardInfo {
pub reward_type: RewardType,
pub lamports: i64, // Reward amount
pub post_balance: u64, // Account balance in lamports after `lamports` was applied
pub commission: Option<u8>, // Vote account commission when the reward was credited, only present for voting and staking rewards
}
#[derive(Debug, Default)]
pub struct OptionalDropCallback(Option<Box<dyn DropCallback + Send + Sync>>);
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl AbiExample for OptionalDropCallback {
fn example() -> Self {
Self(None)
}
}
#[derive(Debug, Clone, Default)]
pub struct BuiltinPrograms {
pub vec: Vec<BuiltinProgram>,
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl AbiExample for BuiltinPrograms {
fn example() -> Self {
Self::default()
}
}
/// Manager for the state of all accounts and programs after processing its entries.
/// AbiExample is needed even without Serialize/Deserialize; actual (de-)serialization
/// are implemented elsewhere for versioning
#[derive(AbiExample, Debug)]
pub struct Bank {
/// References to accounts, parent and signature status
pub rc: BankRc,
pub src: StatusCacheRc,
/// FIFO queue of `recent_blockhash` items
blockhash_queue: RwLock<BlockhashQueue>,
/// The set of parents including this bank
pub ancestors: Ancestors,
/// Hash of this Bank's state. Only meaningful after freezing.
hash: RwLock<Hash>,
/// Hash of this Bank's parent's state
parent_hash: Hash,
/// parent's slot
parent_slot: Slot,
/// slots to hard fork at
hard_forks: Arc<RwLock<HardForks>>,
/// The number of transactions processed without error
transaction_count: AtomicU64,
/// The number of transaction errors in this slot
transaction_error_count: AtomicU64,
/// The number of transaction entries in this slot
transaction_entries_count: AtomicU64,
/// The max number of transaction in an entry in this slot
transactions_per_entry_max: AtomicU64,
/// Bank tick height
tick_height: AtomicU64,
/// The number of signatures from valid transactions in this slot
signature_count: AtomicU64,
/// Total capitalization, used to calculate inflation
capitalization: AtomicU64,
// Bank max_tick_height
max_tick_height: u64,
/// The number of hashes in each tick. None value means hashing is disabled.
hashes_per_tick: Option<u64>,
/// The number of ticks in each slot.
ticks_per_slot: u64,
/// length of a slot in ns
pub ns_per_slot: u128,
/// genesis time, used for computed clock
genesis_creation_time: UnixTimestamp,
/// The number of slots per year, used for inflation
slots_per_year: f64,
/// Unused
unused: u64,
/// Bank slot (i.e. block)
slot: Slot,
bank_id: BankId,
/// Bank epoch
epoch: Epoch,
/// Bank block_height
block_height: u64,
/// The pubkey to send transactions fees to.
collector_id: Pubkey,
/// Fees that have been collected
collector_fees: AtomicU64,
/// Deprecated, do not use
/// Latest transaction fees for transactions processed by this bank
fee_calculator: FeeCalculator,
/// Track cluster signature throughput and adjust fee rate
fee_rate_governor: FeeRateGovernor,
/// Rent that has been collected
collected_rent: AtomicU64,
/// latest rent collector, knows the epoch
rent_collector: RentCollector,
/// initialized from genesis
epoch_schedule: EpochSchedule,
/// inflation specs
inflation: Arc<RwLock<Inflation>>,
/// cache of vote_account and stake_account state for this fork
stakes_cache: StakesCache,
/// staked nodes on epoch boundaries, saved off when a bank.slot() is at
/// a leader schedule calculation boundary
epoch_stakes: HashMap<Epoch, EpochStakes>,
/// A boolean reflecting whether any entries were recorded into the PoH
/// stream for the slot == self.slot
is_delta: AtomicBool,
/// The builtin programs
builtin_programs: BuiltinPrograms,
compute_budget: Option<ComputeBudget>,
/// Builtin programs activated dynamically by feature
#[allow(clippy::rc_buffer)]
feature_builtins: Arc<Vec<(Builtin, Pubkey, ActivationType)>>,
/// Protocol-level rewards that were distributed by this bank
pub rewards: RwLock<Vec<(Pubkey, RewardInfo)>>,
pub cluster_type: Option<ClusterType>,
pub lazy_rent_collection: AtomicBool,
// this is temporary field only to remove rewards_pool entirely
pub rewards_pool_pubkeys: Arc<HashSet<Pubkey>>,
/// Cached executors
cached_executors: RwLock<CowCachedExecutors>,
transaction_debug_keys: Option<Arc<HashSet<Pubkey>>>,
// Global configuration for how transaction logs should be collected across all banks
pub transaction_log_collector_config: Arc<RwLock<TransactionLogCollectorConfig>>,
// Logs from transactions that this Bank executed collected according to the criteria in
// `transaction_log_collector_config`
pub transaction_log_collector: Arc<RwLock<TransactionLogCollector>>,
pub feature_set: Arc<FeatureSet>,
pub drop_callback: RwLock<OptionalDropCallback>,
pub freeze_started: AtomicBool,
vote_only_bank: bool,
pub cost_tracker: RwLock<CostTracker>,
sysvar_cache: RwLock<Vec<(Pubkey, Vec<u8>)>>,
/// Current size of the accounts data. Used when processing messages to enforce a limit on its
/// maximum size.
accounts_data_len: AtomicU64,
}
impl Default for BlockhashQueue {
fn default() -> Self {
Self::new(MAX_RECENT_BLOCKHASHES)
}
}
struct VoteWithStakeDelegations {
vote_state: Arc<VoteState>,
vote_account: AccountSharedData,
delegations: Vec<(Pubkey, (StakeState, AccountSharedData))>,
}
struct LoadVoteAndStakeAccountsResult {
vote_with_stake_delegations_map: DashMap<Pubkey, VoteWithStakeDelegations>,
invalid_stake_keys: DashMap<Pubkey, InvalidCacheEntryReason>,
invalid_vote_keys: DashMap<Pubkey, InvalidCacheEntryReason>,
}
#[derive(Debug, Default)]
pub struct NewBankOptions {
pub vote_only_bank: bool,
}
impl Bank {
pub fn default_for_tests() -> Self {
Self::default_with_accounts(Accounts::default_for_tests())
}
pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self {
// this will diverge
Self::new_for_tests(genesis_config)
}
pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self {
// this will diverge
Self::new_with_paths_for_tests(
genesis_config,
Vec::new(),
None,
None,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
false,
)
}
pub fn new_no_wallclock_throttle_for_tests(genesis_config: &GenesisConfig) -> Self {
let mut bank = Self::new_with_paths_for_tests(
genesis_config,
Vec::new(),
None,
None,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
false,
);
bank.ns_per_slot = std::u128::MAX;
bank
}
#[cfg(test)]
pub(crate) fn new_with_config(
genesis_config: &GenesisConfig,
account_indexes: AccountSecondaryIndexes,
accounts_db_caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
) -> Self {
Self::new_with_paths_for_tests(
genesis_config,
Vec::new(),
None,
None,
account_indexes,
accounts_db_caching_enabled,
shrink_ratio,
false,
)
}
fn default_with_accounts(accounts: Accounts) -> Self {
let bank = Self {
rc: BankRc::new(accounts, Slot::default()),
src: StatusCacheRc::default(),
blockhash_queue: RwLock::<BlockhashQueue>::default(),
ancestors: Ancestors::default(),
hash: RwLock::<Hash>::default(),
parent_hash: Hash::default(),
parent_slot: Slot::default(),
hard_forks: Arc::<RwLock<HardForks>>::default(),
transaction_count: AtomicU64::default(),
transaction_error_count: AtomicU64::default(),
transaction_entries_count: AtomicU64::default(),
transactions_per_entry_max: AtomicU64::default(),
tick_height: AtomicU64::default(),
signature_count: AtomicU64::default(),
capitalization: AtomicU64::default(),
max_tick_height: u64::default(),
hashes_per_tick: Option::<u64>::default(),
ticks_per_slot: u64::default(),
ns_per_slot: u128::default(),
genesis_creation_time: UnixTimestamp::default(),
slots_per_year: f64::default(),
unused: u64::default(),
slot: Slot::default(),
bank_id: BankId::default(),
epoch: Epoch::default(),
block_height: u64::default(),
collector_id: Pubkey::default(),
collector_fees: AtomicU64::default(),
fee_calculator: FeeCalculator::default(),
fee_rate_governor: FeeRateGovernor::default(),
collected_rent: AtomicU64::default(),
rent_collector: RentCollector::default(),
epoch_schedule: EpochSchedule::default(),
inflation: Arc::<RwLock<Inflation>>::default(),
stakes_cache: StakesCache::default(),
epoch_stakes: HashMap::<Epoch, EpochStakes>::default(),
is_delta: AtomicBool::default(),
builtin_programs: BuiltinPrograms::default(),
compute_budget: Option::<ComputeBudget>::default(),
feature_builtins: Arc::<Vec<(Builtin, Pubkey, ActivationType)>>::default(),
rewards: RwLock::<Vec<(Pubkey, RewardInfo)>>::default(),
cluster_type: Option::<ClusterType>::default(),
lazy_rent_collection: AtomicBool::default(),
rewards_pool_pubkeys: Arc::<HashSet<Pubkey>>::default(),
cached_executors: RwLock::<CowCachedExecutors>::default(),
transaction_debug_keys: Option::<Arc<HashSet<Pubkey>>>::default(),
transaction_log_collector_config: Arc::<RwLock<TransactionLogCollectorConfig>>::default(
),
transaction_log_collector: Arc::<RwLock<TransactionLogCollector>>::default(),
feature_set: Arc::<FeatureSet>::default(),
drop_callback: RwLock::<OptionalDropCallback>::default(),
freeze_started: AtomicBool::default(),
vote_only_bank: false,
cost_tracker: RwLock::<CostTracker>::default(),
sysvar_cache: RwLock::new(Vec::new()),
accounts_data_len: AtomicU64::default(),
};
let total_accounts_stats = bank.get_total_accounts_stats().unwrap();
bank.store_accounts_data_len(total_accounts_stats.data_len as u64);
bank
}
pub fn new_with_paths_for_tests(
genesis_config: &GenesisConfig,
paths: Vec<PathBuf>,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
account_indexes: AccountSecondaryIndexes,
accounts_db_caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
debug_do_not_add_builtins: bool,
) -> Self {
Self::new_with_paths(
genesis_config,
paths,
debug_keys,
additional_builtins,
account_indexes,
accounts_db_caching_enabled,
shrink_ratio,
debug_do_not_add_builtins,
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
None,
)
}
pub fn new_with_paths_for_benches(
genesis_config: &GenesisConfig,
paths: Vec<PathBuf>,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
account_indexes: AccountSecondaryIndexes,
accounts_db_caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
debug_do_not_add_builtins: bool,
) -> Self {
Self::new_with_paths(
genesis_config,
paths,
debug_keys,
additional_builtins,
account_indexes,
accounts_db_caching_enabled,
shrink_ratio,
debug_do_not_add_builtins,
Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
None,
)
}
#[allow(clippy::too_many_arguments)]
pub fn new_with_paths(
genesis_config: &GenesisConfig,
paths: Vec<PathBuf>,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
account_indexes: AccountSecondaryIndexes,
accounts_db_caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
debug_do_not_add_builtins: bool,
accounts_db_config: Option<AccountsDbConfig>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
) -> Self {
let accounts = Accounts::new_with_config(
paths,
&genesis_config.cluster_type,
account_indexes,
accounts_db_caching_enabled,
shrink_ratio,
accounts_db_config,
accounts_update_notifier,
);
let mut bank = Self::default_with_accounts(accounts);
bank.ancestors = Ancestors::from(vec![bank.slot()]);
bank.transaction_debug_keys = debug_keys;
bank.cluster_type = Some(genesis_config.cluster_type);
bank.process_genesis_config(genesis_config);
bank.finish_init(
genesis_config,
additional_builtins,
debug_do_not_add_builtins,
);
// genesis needs stakes for all epochs up to the epoch implied by
// slot = 0 and genesis configuration
{
let stakes = bank.stakes_cache.stakes();
for epoch in 0..=bank.get_leader_schedule_epoch(bank.slot) {
bank.epoch_stakes
.insert(epoch, EpochStakes::new(&stakes, epoch));
}
bank.update_stake_history(None);
}
bank.update_clock(None);
bank.update_rent();
bank.update_epoch_schedule();
bank.update_recent_blockhashes();
bank.fill_sysvar_cache();
bank
}
/// Create a new bank that points to an immutable checkpoint of another bank.
pub fn new_from_parent(parent: &Arc<Bank>, collector_id: &Pubkey, slot: Slot) -> Self {
Self::_new_from_parent(
parent,
collector_id,
slot,
null_tracer(),
NewBankOptions::default(),
)
}
pub fn new_from_parent_with_options(
parent: &Arc<Bank>,
collector_id: &Pubkey,
slot: Slot,
new_bank_options: NewBankOptions,
) -> Self {
Self::_new_from_parent(parent, collector_id, slot, null_tracer(), new_bank_options)
}
pub fn new_from_parent_with_tracer(
parent: &Arc<Bank>,
collector_id: &Pubkey,
slot: Slot,
reward_calc_tracer: impl Fn(&RewardCalculationEvent) + Send + Sync,
) -> Self {
Self::_new_from_parent(
parent,
collector_id,
slot,
Some(reward_calc_tracer),
NewBankOptions::default(),
)
}
fn _new_from_parent(
parent: &Arc<Bank>,
collector_id: &Pubkey,
slot: Slot,
reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
new_bank_options: NewBankOptions,
) -> Self {
let mut time = Measure::start("bank::new_from_parent");
let NewBankOptions { vote_only_bank } = new_bank_options;
parent.freeze();
assert_ne!(slot, parent.slot());
let epoch_schedule = parent.epoch_schedule;
let epoch = epoch_schedule.get_epoch(slot);
let rc = BankRc {
accounts: Arc::new(Accounts::new_from_parent(
&parent.rc.accounts,
slot,
parent.slot(),
)),
parent: RwLock::new(Some(parent.clone())),
slot,
bank_id_generator: parent.rc.bank_id_generator.clone(),
};
let src = StatusCacheRc {
status_cache: parent.src.status_cache.clone(),
};
let fee_rate_governor =
FeeRateGovernor::new_derived(&parent.fee_rate_governor, parent.signature_count());
let fee_calculator = if parent.feature_set.is_active(&disable_fee_calculator::id()) {
FeeCalculator::default()
} else {
fee_rate_governor.create_fee_calculator()
};
let bank_id = rc.bank_id_generator.fetch_add(1, Relaxed) + 1;
let mut new = Bank {
rc,
src,
slot,
bank_id,
epoch,
blockhash_queue: RwLock::new(parent.blockhash_queue.read().unwrap().clone()),
// TODO: clean this up, so much special-case copying...
hashes_per_tick: parent.hashes_per_tick,
ticks_per_slot: parent.ticks_per_slot,
ns_per_slot: parent.ns_per_slot,
genesis_creation_time: parent.genesis_creation_time,
unused: parent.unused,
slots_per_year: parent.slots_per_year,
epoch_schedule,
collected_rent: AtomicU64::new(0),
rent_collector: parent.rent_collector.clone_with_epoch(epoch),
max_tick_height: (slot + 1) * parent.ticks_per_slot,
block_height: parent.block_height + 1,
fee_calculator,
fee_rate_governor,
capitalization: AtomicU64::new(parent.capitalization()),
vote_only_bank,
inflation: parent.inflation.clone(),
transaction_count: AtomicU64::new(parent.transaction_count()),
transaction_error_count: AtomicU64::new(0),
transaction_entries_count: AtomicU64::new(0),
transactions_per_entry_max: AtomicU64::new(0),
// we will .clone_with_epoch() this soon after stake data update; so just .clone() for now
stakes_cache: StakesCache::new(parent.stakes_cache.stakes().clone()),
epoch_stakes: parent.epoch_stakes.clone(),
parent_hash: parent.hash(),
parent_slot: parent.slot(),
collector_id: *collector_id,
collector_fees: AtomicU64::new(0),
ancestors: Ancestors::default(),
hash: RwLock::new(Hash::default()),
is_delta: AtomicBool::new(false),
tick_height: AtomicU64::new(parent.tick_height.load(Relaxed)),
signature_count: AtomicU64::new(0),
builtin_programs: parent.builtin_programs.clone(),
compute_budget: parent.compute_budget,
feature_builtins: parent.feature_builtins.clone(),
hard_forks: parent.hard_forks.clone(),
rewards: RwLock::new(vec![]),
cluster_type: parent.cluster_type,
lazy_rent_collection: AtomicBool::new(parent.lazy_rent_collection.load(Relaxed)),
rewards_pool_pubkeys: parent.rewards_pool_pubkeys.clone(),
cached_executors: RwLock::new(
(*parent.cached_executors.read().unwrap()).clone_with_epoch(epoch),
),
transaction_debug_keys: parent.transaction_debug_keys.clone(),
transaction_log_collector_config: parent.transaction_log_collector_config.clone(),
transaction_log_collector: Arc::new(RwLock::new(TransactionLogCollector::default())),
feature_set: parent.feature_set.clone(),
drop_callback: RwLock::new(OptionalDropCallback(
parent
.drop_callback
.read()
.unwrap()
.0
.as_ref()
.map(|drop_callback| drop_callback.clone_box()),
)),
freeze_started: AtomicBool::new(false),
cost_tracker: RwLock::new(CostTracker::default()),
sysvar_cache: RwLock::new(Vec::new()),
accounts_data_len: AtomicU64::new(parent.load_accounts_data_len()),
};
let mut ancestors = Vec::with_capacity(1 + new.parents().len());
ancestors.push(new.slot());
new.parents().iter().for_each(|p| {
ancestors.push(p.slot());
});
new.ancestors = Ancestors::from(ancestors);
// Following code may touch AccountsDb, requiring proper ancestors
let parent_epoch = parent.epoch();
if parent_epoch < new.epoch() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
new.apply_feature_activations(false, false);
// Add new entry to stakes.stake_history, set appropriate epoch and
// update vote accounts with warmed up stakes before saving a
// snapshot of stakes in epoch stakes
new.stakes_cache.activate_epoch(epoch, &thread_pool);
// Save a snapshot of stakes for use in consensus and stake weighted networking
let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot);
new.update_epoch_stakes(leader_schedule_epoch);
// After saving a snapshot of stakes, apply stake rewards and commission
new.update_rewards_with_thread_pool(parent_epoch, reward_calc_tracer, &thread_pool);
} else {
// Save a snapshot of stakes for use in consensus and stake weighted networking
let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot);
new.update_epoch_stakes(leader_schedule_epoch);
}
// Update sysvars before processing transactions
new.update_slot_hashes();
new.update_stake_history(Some(parent_epoch));
new.update_clock(Some(parent_epoch));
new.update_fees();
new.fill_sysvar_cache();
time.stop();
datapoint_info!(
"bank-new_from_parent-heights",
("slot_height", slot, i64),
("block_height", new.block_height, i64),
("parent_slot_height", parent.slot(), i64),
("time_us", time.as_us(), i64),
);
new
}
pub fn byte_limit_for_scans(&self) -> Option<usize> {
self.rc
.accounts
.accounts_db
.accounts_index
.scan_results_limit_bytes
}
pub fn proper_ancestors_set(&self) -> HashSet<Slot> {
HashSet::from_iter(self.proper_ancestors())
}
/// Returns all ancestors excluding self.slot.
pub(crate) fn proper_ancestors(&self) -> impl Iterator<Item = Slot> + '_ {
self.ancestors
.keys()
.into_iter()
.filter(move |slot| *slot != self.slot)
}
pub fn set_callback(&self, callback: Option<Box<dyn DropCallback + Send + Sync>>) {
*self.drop_callback.write().unwrap() = OptionalDropCallback(callback);
}
pub fn vote_only_bank(&self) -> bool {
self.vote_only_bank
}
/// Like `new_from_parent` but additionally:
/// * Doesn't assume that the parent is anywhere near `slot`, parent could be millions of slots
/// in the past
/// * Adjusts the new bank's tick height to avoid having to run PoH for millions of slots
/// * Freezes the new bank, assuming that the user will `Bank::new_from_parent` from this bank
pub fn warp_from_parent(parent: &Arc<Bank>, collector_id: &Pubkey, slot: Slot) -> Self {
let parent_timestamp = parent.clock().unix_timestamp;
let mut new = Bank::new_from_parent(parent, collector_id, slot);
new.apply_feature_activations(true, false);
new.update_epoch_stakes(new.epoch_schedule().get_epoch(slot));
new.tick_height.store(new.max_tick_height(), Relaxed);
let mut clock = new.clock();
clock.epoch_start_timestamp = parent_timestamp;
clock.unix_timestamp = parent_timestamp;
new.update_sysvar_account(&sysvar::clock::id(), |account| {
create_account(
&clock,
new.inherit_specially_retained_account_fields(account),
)
});
new.freeze();
new
}
/// Create a bank from explicit arguments and deserialized fields from snapshot
#[allow(clippy::float_cmp)]
pub(crate) fn new_from_fields(
bank_rc: BankRc,
genesis_config: &GenesisConfig,
fields: BankFieldsToDeserialize,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
debug_do_not_add_builtins: bool,
accounts_data_len: u64,
) -> Self {
fn new<T: Default>() -> T {
T::default()
}
let mut bank = Self {
rc: bank_rc,
src: new(),
blockhash_queue: RwLock::new(fields.blockhash_queue),
ancestors: Ancestors::from(&fields.ancestors),
hash: RwLock::new(fields.hash),
parent_hash: fields.parent_hash,
parent_slot: fields.parent_slot,
hard_forks: Arc::new(RwLock::new(fields.hard_forks)),
transaction_count: AtomicU64::new(fields.transaction_count),
transaction_error_count: new(),
transaction_entries_count: new(),
transactions_per_entry_max: new(),
tick_height: AtomicU64::new(fields.tick_height),
signature_count: AtomicU64::new(fields.signature_count),
capitalization: AtomicU64::new(fields.capitalization),
max_tick_height: fields.max_tick_height,
hashes_per_tick: fields.hashes_per_tick,
ticks_per_slot: fields.ticks_per_slot,
ns_per_slot: fields.ns_per_slot,
genesis_creation_time: fields.genesis_creation_time,
slots_per_year: fields.slots_per_year,
unused: genesis_config.unused,
slot: fields.slot,
bank_id: 0,
epoch: fields.epoch,
block_height: fields.block_height,
collector_id: fields.collector_id,
collector_fees: AtomicU64::new(fields.collector_fees),
fee_calculator: fields.fee_calculator,
fee_rate_governor: fields.fee_rate_governor,
collected_rent: AtomicU64::new(fields.collected_rent),
// clone()-ing is needed to consider a gated behavior in rent_collector
rent_collector: fields.rent_collector.clone_with_epoch(fields.epoch),
epoch_schedule: fields.epoch_schedule,
inflation: Arc::new(RwLock::new(fields.inflation)),
stakes_cache: StakesCache::new(fields.stakes),
epoch_stakes: fields.epoch_stakes,
is_delta: AtomicBool::new(fields.is_delta),
builtin_programs: new(),
compute_budget: None,
feature_builtins: new(),
rewards: new(),
cluster_type: Some(genesis_config.cluster_type),
lazy_rent_collection: new(),
rewards_pool_pubkeys: new(),
cached_executors: RwLock::new(CowCachedExecutors::new(Arc::new(RwLock::new(
CachedExecutors::new(MAX_CACHED_EXECUTORS, fields.epoch),
)))),
transaction_debug_keys: debug_keys,
transaction_log_collector_config: new(),
transaction_log_collector: new(),
feature_set: new(),
drop_callback: RwLock::new(OptionalDropCallback(None)),
freeze_started: AtomicBool::new(fields.hash != Hash::default()),
vote_only_bank: false,
cost_tracker: RwLock::new(CostTracker::default()),
sysvar_cache: RwLock::new(Vec::new()),
accounts_data_len: AtomicU64::new(accounts_data_len),
};
bank.finish_init(
genesis_config,
additional_builtins,
debug_do_not_add_builtins,
);
// Sanity assertions between bank snapshot and genesis config
// Consider removing from serializable bank state
// (BankFieldsToSerialize/BankFieldsToDeserialize) and initializing
// from the passed in genesis_config instead (as new()/new_with_paths() already do)
assert_eq!(
bank.hashes_per_tick,
genesis_config.poh_config.hashes_per_tick
);
assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot);
assert_eq!(
bank.ns_per_slot,
genesis_config.poh_config.target_tick_duration.as_nanos()
* genesis_config.ticks_per_slot as u128
);
assert_eq!(bank.genesis_creation_time, genesis_config.creation_time);
assert_eq!(bank.unused, genesis_config.unused);
assert_eq!(bank.max_tick_height, (bank.slot + 1) * bank.ticks_per_slot);
assert_eq!(
bank.slots_per_year,
years_as_slots(
1.0,
&genesis_config.poh_config.target_tick_duration,
bank.ticks_per_slot,
)
);
assert_eq!(bank.epoch_schedule, genesis_config.epoch_schedule);
assert_eq!(bank.epoch, bank.epoch_schedule.get_epoch(bank.slot));
if !bank.feature_set.is_active(&disable_fee_calculator::id()) {
bank.fee_rate_governor.lamports_per_signature =
bank.fee_calculator.lamports_per_signature;
assert_eq!(
bank.fee_rate_governor.create_fee_calculator(),
bank.fee_calculator
);
}
bank
}
/// Return subset of bank fields representing serializable state
pub(crate) fn get_fields_to_serialize<'a>(
&'a self,
ancestors: &'a HashMap<Slot, usize>,
) -> BankFieldsToSerialize<'a> {
BankFieldsToSerialize {
blockhash_queue: &self.blockhash_queue,
ancestors,
hash: *self.hash.read().unwrap(),
parent_hash: self.parent_hash,
parent_slot: self.parent_slot,
hard_forks: &*self.hard_forks,
transaction_count: self.transaction_count.load(Relaxed),
tick_height: self.tick_height.load(Relaxed),
signature_count: self.signature_count.load(Relaxed),
capitalization: self.capitalization.load(Relaxed),
max_tick_height: self.max_tick_height,
hashes_per_tick: self.hashes_per_tick,
ticks_per_slot: self.ticks_per_slot,
ns_per_slot: self.ns_per_slot,
genesis_creation_time: self.genesis_creation_time,
slots_per_year: self.slots_per_year,
unused: self.unused,
slot: self.slot,
epoch: self.epoch,
block_height: self.block_height,
collector_id: self.collector_id,
collector_fees: self.collector_fees.load(Relaxed),
fee_calculator: self.fee_calculator.clone(),
fee_rate_governor: self.fee_rate_governor.clone(),
collected_rent: self.collected_rent.load(Relaxed),
rent_collector: self.rent_collector.clone(),
epoch_schedule: self.epoch_schedule,
inflation: *self.inflation.read().unwrap(),
stakes: &self.stakes_cache,
epoch_stakes: &self.epoch_stakes,
is_delta: self.is_delta.load(Relaxed),
}
}
pub fn collector_id(&self) -> &Pubkey {
&self.collector_id
}
pub fn genesis_creation_time(&self) -> UnixTimestamp {
self.genesis_creation_time
}
pub fn slot(&self) -> Slot {
self.slot
}
pub fn bank_id(&self) -> BankId {
self.bank_id
}
pub fn epoch(&self) -> Epoch {
self.epoch
}
pub fn first_normal_epoch(&self) -> Epoch {
self.epoch_schedule.first_normal_epoch
}
pub fn freeze_lock(&self) -> RwLockReadGuard<Hash> {
self.hash.read().unwrap()
}
pub fn hash(&self) -> Hash {
*self.hash.read().unwrap()
}
pub fn is_frozen(&self) -> bool {
*self.hash.read().unwrap() != Hash::default()
}
pub fn freeze_started(&self) -> bool {
self.freeze_started.load(Relaxed)
}
pub fn status_cache_ancestors(&self) -> Vec<u64> {
let mut roots = self.src.status_cache.read().unwrap().roots().clone();
let min = roots.iter().min().cloned().unwrap_or(0);
for ancestor in self.ancestors.keys() {
if ancestor >= min {
roots.insert(ancestor);
}
}
let mut ancestors: Vec<_> = roots.into_iter().collect();
#[allow(clippy::stable_sort_primitive)]
ancestors.sort();
ancestors
}
/// computed unix_timestamp at this slot height
pub fn unix_timestamp_from_genesis(&self) -> i64 {
self.genesis_creation_time + ((self.slot as u128 * self.ns_per_slot) / 1_000_000_000) as i64
}
fn update_sysvar_account<F>(&self, pubkey: &Pubkey, updater: F)
where
F: Fn(&Option<AccountSharedData>) -> AccountSharedData,
{
let old_account = if !self.rent_for_sysvars() {
// This old behavior is being retired for simpler reasoning for the benefits of all.
// Specifically, get_sysvar_account_with_fixed_root() doesn't work nicely with eager
// rent collection, which becomes significant for sysvars after rent_for_sysvars
// activation. That's because get_sysvar_account_with_fixed_root() invocations by both
// update_slot_history() and update_recent_blockhashes() ignores any updates
// by eager rent collection in this slot.
// Also, it turned out that get_sysvar_account_with_fixed_root()'s special
// behavior (idempotent) isn't needed to begin with, because we're fairly certain that
// we don't call new_from_parent() with same child slot multiple times in the
// production code (except after proper handling of duplicate slot dumping)...
self.get_sysvar_account_with_fixed_root(pubkey)
} else {
self.get_account_with_fixed_root(pubkey)
};
let mut new_account = updater(&old_account);
if self.rent_for_sysvars() {
// When new sysvar comes into existence (with RENT_UNADJUSTED_INITIAL_BALANCE lamports),
// this code ensures that the sysvar's balance is adjusted to be rent-exempt.
// Note that all of existing sysvar balances must be adjusted immediately (i.e. reset) upon
// the `rent_for_sysvars` feature activation (ref: reset_all_sysvar_balances).
//
// More generally, this code always re-calculates for possible sysvar data size change,
// although there is no such sysvars currently.
self.adjust_sysvar_balance_for_rent(&mut new_account);
}
self.store_account_and_update_capitalization(pubkey, &new_account);
// Update the entry in the cache
let mut sysvar_cache = self.sysvar_cache.write().unwrap();
if let Some(position) = sysvar_cache.iter().position(|(id, _data)| id == pubkey) {
sysvar_cache[position].1 = new_account.data().to_vec();
} else {
sysvar_cache.push((*pubkey, new_account.data().to_vec()));
}
}
fn inherit_specially_retained_account_fields(
&self,
old_account: &Option<AccountSharedData>,
) -> InheritableAccountFields {
const RENT_UNADJUSTED_INITIAL_BALANCE: u64 = 1;
(
old_account
.as_ref()
.map(|a| a.lamports())
.unwrap_or(RENT_UNADJUSTED_INITIAL_BALANCE),
if !self.rent_for_sysvars() {
INITIAL_RENT_EPOCH
} else {
// start to inherit rent_epoch updated by rent collection to be consistent with
// other normal accounts
old_account
.as_ref()
.map(|a| a.rent_epoch())
.unwrap_or(INITIAL_RENT_EPOCH)
},
)
}
/// Unused conversion
pub fn get_unused_from_slot(rooted_slot: Slot, unused: u64) -> u64 {
(rooted_slot + (unused - 1)) / unused
}
pub fn clock(&self) -> sysvar::clock::Clock {
from_account(&self.get_account(&sysvar::clock::id()).unwrap_or_default())
.unwrap_or_default()
}
fn update_clock(&self, parent_epoch: Option<Epoch>) {
let mut unix_timestamp = self.clock().unix_timestamp;
let warp_timestamp_again = self
.feature_set
.activated_slot(&feature_set::warp_timestamp_again::id());
let epoch_start_timestamp = if warp_timestamp_again == Some(self.slot()) {
None
} else {
let epoch = if let Some(epoch) = parent_epoch {
epoch
} else {
self.epoch()
};
let first_slot_in_epoch = self.epoch_schedule.get_first_slot_in_epoch(epoch);
Some((first_slot_in_epoch, self.clock().epoch_start_timestamp))
};
let max_allowable_drift = if self
.feature_set
.is_active(&feature_set::warp_timestamp_again::id())
{
MaxAllowableDrift {
fast: MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST,
slow: MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW,
}
} else {
MaxAllowableDrift {
fast: MAX_ALLOWABLE_DRIFT_PERCENTAGE,
slow: MAX_ALLOWABLE_DRIFT_PERCENTAGE,
}
};
let ancestor_timestamp = self.clock().unix_timestamp;
if let Some(timestamp_estimate) =
self.get_timestamp_estimate(max_allowable_drift, epoch_start_timestamp)
{
unix_timestamp = timestamp_estimate;
if timestamp_estimate < ancestor_timestamp {
unix_timestamp = ancestor_timestamp;
}
}
datapoint_info!(
"bank-timestamp-correction",
("slot", self.slot(), i64),
("from_genesis", self.unix_timestamp_from_genesis(), i64),
("corrected", unix_timestamp, i64),
("ancestor_timestamp", ancestor_timestamp, i64),
);
let mut epoch_start_timestamp =
// On epoch boundaries, update epoch_start_timestamp
if parent_epoch.is_some() && parent_epoch.unwrap() != self.epoch() {
unix_timestamp
} else {
self.clock().epoch_start_timestamp
};
if self.slot == 0 {
unix_timestamp = self.unix_timestamp_from_genesis();
epoch_start_timestamp = self.unix_timestamp_from_genesis();
}
let clock = sysvar::clock::Clock {
slot: self.slot,
epoch_start_timestamp,
epoch: self.epoch_schedule.get_epoch(self.slot),
leader_schedule_epoch: self.epoch_schedule.get_leader_schedule_epoch(self.slot),
unix_timestamp,
};
self.update_sysvar_account(&sysvar::clock::id(), |account| {
create_account(
&clock,
self.inherit_specially_retained_account_fields(account),
)
});
}
pub fn set_sysvar_for_tests<T>(&self, sysvar: &T)
where
T: Sysvar + SysvarId,
{
self.update_sysvar_account(&T::id(), |account| {
create_account(
sysvar,
self.inherit_specially_retained_account_fields(account),
)
});
}
fn update_slot_history(&self) {
self.update_sysvar_account(&sysvar::slot_history::id(), |account| {
let mut slot_history = account
.as_ref()
.map(|account| from_account::<SlotHistory, _>(account).unwrap())
.unwrap_or_default();
slot_history.add(self.slot());
create_account(
&slot_history,
self.inherit_specially_retained_account_fields(account),
)
});
}
fn update_slot_hashes(&self) {
self.update_sysvar_account(&sysvar::slot_hashes::id(), |account| {
let mut slot_hashes = account
.as_ref()
.map(|account| from_account::<SlotHashes, _>(account).unwrap())
.unwrap_or_default();
slot_hashes.add(self.parent_slot, self.parent_hash);
create_account(
&slot_hashes,
self.inherit_specially_retained_account_fields(account),
)
});
}
fn fill_sysvar_cache(&mut self) {
let mut sysvar_cache = self.sysvar_cache.write().unwrap();
for id in sysvar::ALL_IDS.iter() {
if !sysvar_cache.iter().any(|(key, _data)| key == id) {
if let Some(account) = self.get_account_with_fixed_root(id) {
sysvar_cache.push((*id, account.data().to_vec()));
}
}
}
}
pub fn get_slot_history(&self) -> SlotHistory {
from_account(&self.get_account(&sysvar::slot_history::id()).unwrap()).unwrap()
}
fn update_epoch_stakes(&mut self, leader_schedule_epoch: Epoch) {
// update epoch_stakes cache
// if my parent didn't populate for this staker's epoch, we've
// crossed a boundary
if self.epoch_stakes.get(&leader_schedule_epoch).is_none() {
self.epoch_stakes.retain(|&epoch, _| {
epoch >= leader_schedule_epoch.saturating_sub(MAX_LEADER_SCHEDULE_STAKES)
});
let new_epoch_stakes =
EpochStakes::new(&self.stakes_cache.stakes(), leader_schedule_epoch);
{
let vote_stakes: HashMap<_, _> = self
.stakes_cache
.stakes()
.vote_accounts()
.iter()
.map(|(pubkey, (stake, _))| (*pubkey, *stake))
.collect();
info!(
"new epoch stakes, epoch: {}, stakes: {:#?}, total_stake: {}",
leader_schedule_epoch,
vote_stakes,
new_epoch_stakes.total_stake(),
);
}
self.epoch_stakes
.insert(leader_schedule_epoch, new_epoch_stakes);
}
}
#[allow(deprecated)]
fn update_fees(&self) {
if !self
.feature_set
.is_active(&feature_set::disable_fees_sysvar::id())
{
self.update_sysvar_account(&sysvar::fees::id(), |account| {
create_account(
&sysvar::fees::Fees::new(&self.fee_rate_governor.create_fee_calculator()),
self.inherit_specially_retained_account_fields(account),
)
});
}
}
fn update_rent(&self) {
self.update_sysvar_account(&sysvar::rent::id(), |account| {
create_account(
&self.rent_collector.rent,
self.inherit_specially_retained_account_fields(account),
)
});
}
fn update_epoch_schedule(&self) {
self.update_sysvar_account(&sysvar::epoch_schedule::id(), |account| {
create_account(
&self.epoch_schedule,
self.inherit_specially_retained_account_fields(account),
)
});
}
fn update_stake_history(&self, epoch: Option<Epoch>) {
if epoch == Some(self.epoch()) {
return;
}
// if I'm the first Bank in an epoch, ensure stake_history is updated
self.update_sysvar_account(&sysvar::stake_history::id(), |account| {
create_account::<sysvar::stake_history::StakeHistory>(
self.stakes_cache.stakes().history(),
self.inherit_specially_retained_account_fields(account),
)
});
}
pub fn epoch_duration_in_years(&self, prev_epoch: Epoch) -> f64 {
// period: time that has passed as a fraction of a year, basically the length of
// an epoch as a fraction of a year
// calculated as: slots_elapsed / (slots / year)
self.epoch_schedule.get_slots_in_epoch(prev_epoch) as f64 / self.slots_per_year
}
// Calculates the starting-slot for inflation from the activation slot.
// This method assumes that `pico_inflation` will be enabled before `full_inflation`, giving
// precedence to the latter. However, since `pico_inflation` is fixed-rate Inflation, should
// `pico_inflation` be enabled 2nd, the incorrect start slot provided here should have no
// effect on the inflation calculation.
fn get_inflation_start_slot(&self) -> Slot {
let mut slots = self
.feature_set
.full_inflation_features_enabled()
.iter()
.filter_map(|id| self.feature_set.activated_slot(id))
.collect::<Vec<_>>();
slots.sort_unstable();
slots.get(0).cloned().unwrap_or_else(|| {
self.feature_set
.activated_slot(&feature_set::pico_inflation::id())
.unwrap_or(0)
})
}
fn get_inflation_num_slots(&self) -> u64 {
let inflation_activation_slot = self.get_inflation_start_slot();
// Normalize inflation_start to align with the start of rewards accrual.
let inflation_start_slot = self.epoch_schedule.get_first_slot_in_epoch(
self.epoch_schedule
.get_epoch(inflation_activation_slot)
.saturating_sub(1),
);
self.epoch_schedule.get_first_slot_in_epoch(self.epoch()) - inflation_start_slot
}
pub fn slot_in_year_for_inflation(&self) -> f64 {
let num_slots = self.get_inflation_num_slots();
// calculated as: num_slots / (slots / year)
num_slots as f64 / self.slots_per_year
}
// update rewards based on the previous epoch
fn update_rewards_with_thread_pool(
&mut self,
prev_epoch: Epoch,
reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
thread_pool: &ThreadPool,
) {
let slot_in_year = self.slot_in_year_for_inflation();
let epoch_duration_in_years = self.epoch_duration_in_years(prev_epoch);
let (validator_rate, foundation_rate) = {
let inflation = self.inflation.read().unwrap();
(
(*inflation).validator(slot_in_year),
(*inflation).foundation(slot_in_year),
)
};
let capitalization = self.capitalization();
let validator_rewards =
(validator_rate * capitalization as f64 * epoch_duration_in_years) as u64;
let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
let validator_point_value = self.pay_validator_rewards_with_thread_pool(
prev_epoch,
validator_rewards,
reward_calc_tracer,
self.stake_program_advance_activating_credits_observed(),
thread_pool,
);
if !self
.feature_set
.is_active(&feature_set::deprecate_rewards_sysvar::id())
{
// this sysvar can be retired once `pico_inflation` is enabled on all clusters
self.update_sysvar_account(&sysvar::rewards::id(), |account| {
create_account(
&sysvar::rewards::Rewards::new(validator_point_value),
self.inherit_specially_retained_account_fields(account),
)
});
}
let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked();
let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked;
assert_eq!(
validator_rewards_paid,
u64::try_from(
self.rewards
.read()
.unwrap()
.iter()
.map(|(_address, reward_info)| {
match reward_info.reward_type {
RewardType::Voting | RewardType::Staking => reward_info.lamports,
_ => 0,
}
})
.sum::<i64>()
)
.unwrap()
);
// verify that we didn't pay any more than we expected to
assert!(validator_rewards >= validator_rewards_paid);
info!(
"distributed inflation: {} (rounded from: {})",
validator_rewards_paid, validator_rewards
);
self.capitalization
.fetch_add(validator_rewards_paid, Relaxed);
let active_stake = if let Some(stake_history_entry) =
self.stakes_cache.stakes().history().get(prev_epoch)
{
stake_history_entry.effective
} else {
0
};
datapoint_warn!(
"epoch_rewards",
("slot", self.slot, i64),
("epoch", prev_epoch, i64),
("validator_rate", validator_rate, f64),
("foundation_rate", foundation_rate, f64),
("epoch_duration_in_years", epoch_duration_in_years, f64),
("validator_rewards", validator_rewards_paid, i64),
("active_stake", active_stake, i64),
("pre_capitalization", capitalization, i64),
("post_capitalization", self.capitalization(), i64)
);
}
/// map stake delegations into resolved (pubkey, account) pairs
/// returns a map (has to be copied) of loaded
/// ( Vec<(staker info)> (voter account) ) keyed by voter pubkey
///
/// Filters out invalid pairs
fn load_vote_and_stake_accounts_with_thread_pool(
&self,
thread_pool: &ThreadPool,
reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
) -> LoadVoteAndStakeAccountsResult {
let stakes = self.stakes_cache.stakes();
let vote_with_stake_delegations_map =
DashMap::with_capacity(stakes.vote_accounts().as_ref().len());
let invalid_stake_keys: DashMap<Pubkey, InvalidCacheEntryReason> = DashMap::new();
let invalid_vote_keys: DashMap<Pubkey, InvalidCacheEntryReason> = DashMap::new();
thread_pool.install(|| {
stakes
.stake_delegations()
.par_iter()
.for_each(|(stake_pubkey, delegation)| {
let vote_pubkey = &delegation.voter_pubkey;
if invalid_vote_keys.contains_key(vote_pubkey) {
return;
}
let stake_delegation = match self.get_account_with_fixed_root(stake_pubkey) {
Some(stake_account) => {
if stake_account.owner() != &solana_stake_program::id() {
invalid_stake_keys
.insert(*stake_pubkey, InvalidCacheEntryReason::WrongOwner);
return;
}
match stake_account.state().ok() {
Some(stake_state) => (*stake_pubkey, (stake_state, stake_account)),
None => {
invalid_stake_keys
.insert(*stake_pubkey, InvalidCacheEntryReason::BadState);
return;
}
}
}
None => {
invalid_stake_keys
.insert(*stake_pubkey, InvalidCacheEntryReason::Missing);
return;
}
};
let mut vote_delegations = if let Some(vote_delegations) =
vote_with_stake_delegations_map.get_mut(vote_pubkey)
{
vote_delegations
} else {
let vote_account = match self.get_account_with_fixed_root(vote_pubkey) {
Some(vote_account) => {
if vote_account.owner() != &solana_vote_program::id() {
invalid_vote_keys
.insert(*vote_pubkey, InvalidCacheEntryReason::WrongOwner);
return;
}
vote_account
}
None => {
invalid_vote_keys
.insert(*vote_pubkey, InvalidCacheEntryReason::Missing);
return;
}
};
let vote_state = if let Ok(vote_state) =
StateMut::<VoteStateVersions>::state(&vote_account)
{
vote_state.convert_to_current()
} else {
invalid_vote_keys
.insert(*vote_pubkey, InvalidCacheEntryReason::BadState);
return;
};
vote_with_stake_delegations_map
.entry(*vote_pubkey)
.or_insert_with(|| VoteWithStakeDelegations {
vote_state: Arc::new(vote_state),
vote_account,
delegations: vec![],
})
};
if let Some(reward_calc_tracer) = reward_calc_tracer.as_ref() {
reward_calc_tracer(&RewardCalculationEvent::Staking(
stake_pubkey,
&InflationPointCalculationEvent::Delegation(
*delegation,
solana_vote_program::id(),
),
));
}
vote_delegations.delegations.push(stake_delegation);
});
});
LoadVoteAndStakeAccountsResult {
vote_with_stake_delegations_map,
invalid_vote_keys,
invalid_stake_keys,
}
}
/// iterate over all stakes, redeem vote credits for each stake we can
/// successfully load and parse, return the lamport value of one point
fn pay_validator_rewards_with_thread_pool(
&mut self,
rewarded_epoch: Epoch,
rewards: u64,
reward_calc_tracer: Option<impl Fn(&RewardCalculationEvent) + Send + Sync>,
fix_activating_credits_observed: bool,
thread_pool: &ThreadPool,
) -> f64 {
let stake_history = self.stakes_cache.stakes().history().clone();
let vote_with_stake_delegations_map = {
let LoadVoteAndStakeAccountsResult {
vote_with_stake_delegations_map,
invalid_stake_keys,
invalid_vote_keys,
} = self.load_vote_and_stake_accounts_with_thread_pool(
thread_pool,
reward_calc_tracer.as_ref(),
);
let evict_invalid_stakes_cache_entries = self
.feature_set
.is_active(&feature_set::evict_invalid_stakes_cache_entries::id());
self.stakes_cache.handle_invalid_keys(
invalid_stake_keys,
invalid_vote_keys,
evict_invalid_stakes_cache_entries,
self.slot(),
);
vote_with_stake_delegations_map
};
let points: u128 = thread_pool.install(|| {
vote_with_stake_delegations_map
.par_iter()
.map(|entry| {
let VoteWithStakeDelegations {
vote_state,
delegations,
..
} = entry.value();
delegations
.par_iter()
.map(|(_stake_pubkey, (stake_state, _stake_account))| {
stake_state::calculate_points(
stake_state,
vote_state,
Some(&stake_history),
)
.unwrap_or(0)
})
.sum::<u128>()
})
.sum()
});
if points == 0 {
return 0.0;
}
// pay according to point value
let point_value = PointValue { rewards, points };
let vote_account_rewards: DashMap<Pubkey, (AccountSharedData, u8, u64, bool)> =
DashMap::with_capacity(vote_with_stake_delegations_map.len());
let stake_delegation_iterator = vote_with_stake_delegations_map.into_par_iter().flat_map(
|(
vote_pubkey,
VoteWithStakeDelegations {
vote_state,
vote_account,
delegations,
},
)| {
vote_account_rewards
.insert(vote_pubkey, (vote_account, vote_state.commission, 0, false));
delegations
.into_par_iter()
.map(move |delegation| (vote_pubkey, Arc::clone(&vote_state), delegation))
},
);
let mut stake_rewards = thread_pool.install(|| {
stake_delegation_iterator
.filter_map(
|(
vote_pubkey,
vote_state,
(stake_pubkey, (stake_state, mut stake_account)),
)| {
// curry closure to add the contextual stake_pubkey
let reward_calc_tracer = reward_calc_tracer.as_ref().map(|outer| {
// inner
move |inner_event: &_| {
outer(&RewardCalculationEvent::Staking(&stake_pubkey, inner_event))
}
});
let redeemed = stake_state::redeem_rewards(
rewarded_epoch,
stake_state,
&mut stake_account,
&vote_state,
&point_value,
Some(&stake_history),
reward_calc_tracer.as_ref(),
fix_activating_credits_observed,
);
if let Ok((stakers_reward, voters_reward)) = redeemed {
// track voter rewards
if let Some((
_vote_account,
_commission,
vote_rewards_sum,
vote_needs_store,
)) = vote_account_rewards.get_mut(&vote_pubkey).as_deref_mut()
{
*vote_needs_store = true;
*vote_rewards_sum = vote_rewards_sum.saturating_add(voters_reward);
}
// store stake account even if stakers_reward is 0
// because credits observed has changed
self.store_account(&stake_pubkey, &stake_account);
if stakers_reward > 0 {
return Some((
stake_pubkey,
RewardInfo {
reward_type: RewardType::Staking,
lamports: stakers_reward as i64,
post_balance: stake_account.lamports(),
commission: Some(vote_state.commission),
},
));
}
} else {
debug!(
"stake_state::redeem_rewards() failed for {}: {:?}",
stake_pubkey, redeemed
);
}
None
},
)
.collect()
});
let mut vote_rewards = vote_account_rewards
.into_iter()
.filter_map(
|(vote_pubkey, (mut vote_account, commission, vote_rewards, vote_needs_store))| {
if let Err(err) = vote_account.checked_add_lamports(vote_rewards) {
debug!("reward redemption failed for {}: {:?}", vote_pubkey, err);
return None;
}
if vote_needs_store {
self.store_account(&vote_pubkey, &vote_account);
}
if vote_rewards > 0 {
Some((
vote_pubkey,
RewardInfo {
reward_type: RewardType::Voting,
lamports: vote_rewards as i64,
post_balance: vote_account.lamports(),
commission: Some(commission),
},
))
} else {
None
}
},
)
.collect();
{
let mut rewards = self.rewards.write().unwrap();
rewards.append(&mut vote_rewards);
rewards.append(&mut stake_rewards);
}
point_value.rewards as f64 / point_value.points as f64
}
fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) {
#[allow(deprecated)]
self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| {
let recent_blockhash_iter = locked_blockhash_queue.get_recent_blockhashes();
recent_blockhashes_account::create_account_with_data_and_fields(
recent_blockhash_iter,
self.inherit_specially_retained_account_fields(account),
)
});
}
pub fn update_recent_blockhashes(&self) {
let blockhash_queue = self.blockhash_queue.read().unwrap();
self.update_recent_blockhashes_locked(&blockhash_queue);
}
fn get_timestamp_estimate(
&self,
max_allowable_drift: MaxAllowableDrift,
epoch_start_timestamp: Option<(Slot, UnixTimestamp)>,
) -> Option<UnixTimestamp> {
let mut get_timestamp_estimate_time = Measure::start("get_timestamp_estimate");
let slots_per_epoch = self.epoch_schedule().slots_per_epoch;
let vote_accounts = self.vote_accounts();
let recent_timestamps = vote_accounts.iter().filter_map(|(pubkey, (_, account))| {
let vote_state = account.vote_state();
let vote_state = vote_state.as_ref().ok()?;
let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?;
(slot_delta <= slots_per_epoch).then(|| {
(
*pubkey,
(
vote_state.last_timestamp.slot,
vote_state.last_timestamp.timestamp,
),
)
})
});
let slot_duration = Duration::from_nanos(self.ns_per_slot as u64);
let epoch = self.epoch_schedule().get_epoch(self.slot());
let stakes = self.epoch_vote_accounts(epoch)?;
let stake_weighted_timestamp = calculate_stake_weighted_timestamp(
recent_timestamps,
stakes,
self.slot(),
slot_duration,
epoch_start_timestamp,
max_allowable_drift,
self.feature_set
.is_active(&feature_set::warp_timestamp_again::id()),
);
get_timestamp_estimate_time.stop();
datapoint_info!(
"bank-timestamp",
(
"get_timestamp_estimate_us",
get_timestamp_estimate_time.as_us(),
i64
),
);
stake_weighted_timestamp
}
// Distribute collected transaction fees for this slot to collector_id (= current leader).
//
// Each validator is incentivized to process more transactions to earn more transaction fees.
// Transaction fees are rewarded for the computing resource utilization cost, directly
// proportional to their actual processing power.
//
// collector_id is rotated according to stake-weighted leader schedule. So the opportunity of
// earning transaction fees are fairly distributed by stake. And missing the opportunity
// (not producing a block as a leader) earns nothing. So, being online is incentivized as a
// form of transaction fees as well.
//
// On the other hand, rent fees are distributed under slightly different philosophy, while
// still being stake-weighted.
// Ref: distribute_rent_to_validators
fn collect_fees(&self) {
let collector_fees = self.collector_fees.load(Relaxed) as u64;
if collector_fees != 0 {
let (deposit, mut burn) = self.fee_rate_governor.burn(collector_fees);
// burn a portion of fees
debug!(
"distributed fee: {} (rounded from: {}, burned: {})",
deposit, collector_fees, burn
);
match self.deposit(&self.collector_id, deposit) {
Ok(post_balance) => {
if deposit != 0 {
self.rewards.write().unwrap().push((
self.collector_id,
RewardInfo {
reward_type: RewardType::Fee,
lamports: deposit as i64,
post_balance,
commission: None,
},
));
}
}
Err(_) => {
error!(
"Burning {} fee instead of crediting {}",
deposit, self.collector_id
);
inc_new_counter_error!("bank-burned_fee_lamports", deposit as usize);
burn += deposit;
}
}
self.capitalization.fetch_sub(burn, Relaxed);
}
}
pub fn rehash(&self) {
let mut hash = self.hash.write().unwrap();
let new = self.hash_internal_state();
if new != *hash {
warn!("Updating bank hash to {}", new);
*hash = new;
}
}
pub fn freeze(&self) {
// This lock prevents any new commits from BankingStage
// `process_and_record_transactions_locked()` from coming
// in after the last tick is observed. This is because in
// BankingStage, any transaction successfully recorded in
// `record_transactions()` is recorded after this `hash` lock
// is grabbed. At the time of the successful record,
// this means the PoH has not yet reached the last tick,
// so this means freeze() hasn't been called yet. And because
// BankingStage doesn't release this hash lock until both
// record and commit are finished, those transactions will be
// committed before this write lock can be obtained here.
let mut hash = self.hash.write().unwrap();
if *hash == Hash::default() {
// finish up any deferred changes to account state
self.collect_rent_eagerly();
self.collect_fees();
self.distribute_rent();
self.update_slot_history();
self.run_incinerator();
// freeze is a one-way trip, idempotent
self.freeze_started.store(true, Relaxed);
*hash = self.hash_internal_state();
self.rc.accounts.accounts_db.mark_slot_frozen(self.slot());
}
}
// Should not be called outside of startup, will race with
// concurrent cleaning logic in AccountsBackgroundService
pub fn exhaustively_free_unused_resource(&self, last_full_snapshot_slot: Option<Slot>) {
const IS_STARTUP: bool = true; // this is only called at startup, and we want to use more threads
let mut flush = Measure::start("flush");
// Flush all the rooted accounts. Must be called after `squash()`,
// so that AccountsDb knows what the roots are.
self.force_flush_accounts_cache();
flush.stop();
let mut clean = Measure::start("clean");
// Don't clean the slot we're snapshotting because it may have zero-lamport
// accounts that were included in the bank delta hash when the bank was frozen,
// and if we clean them here, any newly created snapshot's hash for this bank
// may not match the frozen hash.
self.clean_accounts(true, IS_STARTUP, last_full_snapshot_slot);
clean.stop();
let mut shrink = Measure::start("shrink");
self.shrink_all_slots(IS_STARTUP, last_full_snapshot_slot);
shrink.stop();
info!(
"exhaustively_free_unused_resource() {} {} {}",
flush, clean, shrink,
);
}
pub fn epoch_schedule(&self) -> &EpochSchedule {
&self.epoch_schedule
}
/// squash the parent's state up into this Bank,
/// this Bank becomes a root
pub fn squash(&self) -> SquashTiming {
self.freeze();
//this bank and all its parents are now on the rooted path
let mut roots = vec![self.slot()];
roots.append(&mut self.parents().iter().map(|p| p.slot()).collect());
let mut total_index_us = 0;
let mut total_cache_us = 0;
let mut total_store_us = 0;
let mut squash_accounts_time = Measure::start("squash_accounts_time");
for slot in roots.iter().rev() {
// root forks cannot be purged
let add_root_timing = self.rc.accounts.add_root(*slot);
total_index_us += add_root_timing.index_us;
total_cache_us += add_root_timing.cache_us;
total_store_us += add_root_timing.store_us;
}
squash_accounts_time.stop();
*self.rc.parent.write().unwrap() = None;
let mut squash_cache_time = Measure::start("squash_cache_time");
roots
.iter()
.for_each(|slot| self.src.status_cache.write().unwrap().add_root(*slot));
squash_cache_time.stop();
SquashTiming {
squash_accounts_ms: squash_accounts_time.as_ms(),
squash_accounts_index_ms: total_index_us / 1000,
squash_accounts_cache_ms: total_cache_us / 1000,
squash_accounts_store_ms: total_store_us / 1000,
squash_cache_ms: squash_cache_time.as_ms(),
}
}
/// Return the more recent checkpoint of this bank instance.
pub fn parent(&self) -> Option<Arc<Bank>> {
self.rc.parent.read().unwrap().clone()
}
pub fn parent_slot(&self) -> Slot {
self.parent_slot
}
pub fn parent_hash(&self) -> Hash {
self.parent_hash
}
fn process_genesis_config(&mut self, genesis_config: &GenesisConfig) {
// Bootstrap validator collects fees until `new_from_parent` is called.
self.fee_rate_governor = genesis_config.fee_rate_governor.clone();
self.fee_calculator = self.fee_rate_governor.create_fee_calculator();
for (pubkey, account) in genesis_config.accounts.iter() {
assert!(
self.get_account(pubkey).is_none(),
"{} repeated in genesis config",
pubkey
);
self.store_account(pubkey, &AccountSharedData::from(account.clone()));
self.capitalization.fetch_add(account.lamports(), Relaxed);
}
// updating sysvars (the fees sysvar in this case) now depends on feature activations in
// genesis_config.accounts above
self.update_fees();
for (pubkey, account) in genesis_config.rewards_pools.iter() {
assert!(
self.get_account(pubkey).is_none(),
"{} repeated in genesis config",
pubkey
);
self.store_account(pubkey, &AccountSharedData::from(account.clone()));
}
// highest staked node is the first collector
self.collector_id = self
.stakes_cache
.stakes()
.highest_staked_node()
.unwrap_or_default();
self.blockhash_queue.write().unwrap().genesis_hash(
&genesis_config.hash(),
self.fee_rate_governor.lamports_per_signature,
);
self.hashes_per_tick = genesis_config.hashes_per_tick();
self.ticks_per_slot = genesis_config.ticks_per_slot();
self.ns_per_slot = genesis_config.ns_per_slot();
self.genesis_creation_time = genesis_config.creation_time;
self.unused = genesis_config.unused;
self.max_tick_height = (self.slot + 1) * self.ticks_per_slot;
self.slots_per_year = genesis_config.slots_per_year();
self.epoch_schedule = genesis_config.epoch_schedule;
self.inflation = Arc::new(RwLock::new(genesis_config.inflation));
self.rent_collector = RentCollector::new(
self.epoch,
&self.epoch_schedule,
self.slots_per_year,
&genesis_config.rent,
);
// Add additional builtin programs specified in the genesis config
for (name, program_id) in &genesis_config.native_instruction_processors {
self.add_builtin_account(name, program_id, false);
}
}
fn burn_and_purge_account(&self, program_id: &Pubkey, mut account: AccountSharedData) {
self.capitalization.fetch_sub(account.lamports(), Relaxed);
// Both resetting account balance to 0 and zeroing the account data
// is needed to really purge from AccountsDb and flush the Stakes cache
account.set_lamports(0);
account.data_as_mut_slice().fill(0);
self.store_account(program_id, &account);
}
// NOTE: must hold idempotent for the same set of arguments
/// Add a builtin program account
pub fn add_builtin_account(&self, name: &str, program_id: &Pubkey, must_replace: bool) {
let existing_genuine_program =
self.get_account_with_fixed_root(program_id)
.and_then(|account| {
// it's very unlikely to be squatted at program_id as non-system account because of burden to
// find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's
// safe to assume it's a genuine program.
if native_loader::check_id(account.owner()) {
Some(account)
} else {
// malicious account is pre-occupying at program_id
self.burn_and_purge_account(program_id, account);
None
}
});
if must_replace {
// updating builtin program
match &existing_genuine_program {
None => panic!(
"There is no account to replace with builtin program ({}, {}).",
name, program_id
),
Some(account) => {
if *name == String::from_utf8_lossy(account.data()) {
// The existing account is well formed
return;
}
}
}
} else {
// introducing builtin program
if existing_genuine_program.is_some() {
// The existing account is sufficient
return;
}
}
assert!(
!self.freeze_started(),
"Can't change frozen bank by adding not-existing new builtin program ({}, {}). \
Maybe, inconsistent program activation is detected on snapshot restore?",
name,
program_id
);
// Add a bogus executable builtin account, which will be loaded and ignored.
let account = native_loader::create_loadable_account_with_fields(
name,
self.inherit_specially_retained_account_fields(&existing_genuine_program),
);
self.store_account_and_update_capitalization(program_id, &account);
}
/// Add a precompiled program account
pub fn add_precompiled_account(&self, program_id: &Pubkey) {
if let Some(account) = self.get_account_with_fixed_root(program_id) {
if account.executable() {
// The account is already executable, that's all we need
return;
} else {
// malicious account is pre-occupying at program_id
self.burn_and_purge_account(program_id, account);
}
};
assert!(
!self.freeze_started(),
"Can't change frozen bank by adding not-existing new precompiled program ({}). \
Maybe, inconsistent program activation is detected on snapshot restore?",
program_id
);
// Add a bogus executable account, which will be loaded and ignored.
let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None);
let account = AccountSharedData::from(Account {
lamports,
owner: solana_sdk::system_program::id(),
data: vec![],
executable: true,
rent_epoch,
});
self.store_account_and_update_capitalization(program_id, &account);
}
pub fn set_rent_burn_percentage(&mut self, burn_percent: u8) {
self.rent_collector.rent.burn_percent = burn_percent;
}
pub fn set_hashes_per_tick(&mut self, hashes_per_tick: Option<u64>) {
self.hashes_per_tick = hashes_per_tick;
}
/// Return the last block hash registered.
pub fn last_blockhash(&self) -> Hash {
self.blockhash_queue.read().unwrap().last_hash()
}
pub fn last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) {
let blockhash_queue = self.blockhash_queue.read().unwrap();
let last_hash = blockhash_queue.last_hash();
let last_lamports_per_signature = blockhash_queue
.get_lamports_per_signature(&last_hash)
.unwrap(); // safe so long as the BlockhashQueue is consistent
(last_hash, last_lamports_per_signature)
}
pub fn is_blockhash_valid(&self, hash: &Hash) -> bool {
let blockhash_queue = self.blockhash_queue.read().unwrap();
blockhash_queue.check_hash(hash)
}
pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> u64 {
self.rent_collector.rent.minimum_balance(data_len).max(1)
}
pub fn get_lamports_per_signature(&self) -> u64 {
self.fee_rate_governor.lamports_per_signature
}
pub fn get_lamports_per_signature_for_blockhash(&self, hash: &Hash) -> Option<u64> {
let blockhash_queue = self.blockhash_queue.read().unwrap();
blockhash_queue.get_lamports_per_signature(hash)
}
#[deprecated(since = "1.9.0", note = "Please use `get_fee_for_message` instead")]
pub fn get_fee_rate_governor(&self) -> &FeeRateGovernor {
&self.fee_rate_governor
}
pub fn get_fee_for_message(&self, message: &SanitizedMessage) -> Option<u64> {
let blockhash_queue = self.blockhash_queue.read().unwrap();
let lamports_per_signature =
blockhash_queue.get_lamports_per_signature(message.recent_blockhash())?;
Some(Self::calculate_fee(message, lamports_per_signature))
}
pub fn get_fee_for_message_with_lamports_per_signature(
message: &SanitizedMessage,
lamports_per_signature: u64,
) -> u64 {
Self::calculate_fee(message, lamports_per_signature)
}
#[deprecated(
since = "1.6.11",
note = "Please use `get_blockhash_last_valid_block_height`"
)]
pub fn get_blockhash_last_valid_slot(&self, blockhash: &Hash) -> Option<Slot> {
let blockhash_queue = self.blockhash_queue.read().unwrap();
// This calculation will need to be updated to consider epoch boundaries if BlockhashQueue
// length is made variable by epoch
blockhash_queue
.get_hash_age(blockhash)
.map(|age| self.slot + blockhash_queue.len() as u64 - age)
}
pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option<Slot> {
let blockhash_queue = self.blockhash_queue.read().unwrap();
// This calculation will need to be updated to consider epoch boundaries if BlockhashQueue
// length is made variable by epoch
blockhash_queue
.get_hash_age(blockhash)
.map(|age| self.block_height + blockhash_queue.len() as u64 - age)
}
pub fn confirmed_last_blockhash(&self) -> Hash {
const NUM_BLOCKHASH_CONFIRMATIONS: usize = 3;
let parents = self.parents();
if parents.is_empty() {
self.last_blockhash()
} else {
let index = NUM_BLOCKHASH_CONFIRMATIONS.min(parents.len() - 1);
parents[index].last_blockhash()
}
}
/// Forget all signatures. Useful for benchmarking.
pub fn clear_signatures(&self) {
self.src.status_cache.write().unwrap().clear();
}
pub fn clear_slot_signatures(&self, slot: Slot) {
self.src
.status_cache
.write()
.unwrap()
.clear_slot_entries(slot);
}
pub fn can_commit(result: &Result<()>) -> bool {
match result {
Ok(_) => true,
Err(TransactionError::InstructionError(_, _)) => true,
Err(_) => false,
}
}
fn update_transaction_statuses(
&self,
sanitized_txs: &[SanitizedTransaction],
res: &[TransactionExecutionResult],
) {
let mut status_cache = self.src.status_cache.write().unwrap();
assert_eq!(sanitized_txs.len(), res.len());
for (tx, (res, _nonce)) in sanitized_txs.iter().zip(res) {
if Self::can_commit(res) {
// Add the message hash to the status cache to ensure that this message
// won't be processed again with a different signature.
status_cache.insert(
tx.message().recent_blockhash(),
tx.message_hash(),
self.slot(),
res.clone(),
);
// Add the transaction signature to the status cache so that transaction status
// can be queried by transaction signature over RPC. In the future, this should
// only be added for API nodes because voting validators don't need to do this.
status_cache.insert(
tx.message().recent_blockhash(),
tx.signature(),
self.slot(),
res.clone(),
);
}
}
}
/// Tell the bank which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// bank will reject transactions using that `hash`.
pub fn register_tick(&self, hash: &Hash) {
assert!(
!self.freeze_started(),
"register_tick() working on a bank that is already frozen or is undergoing freezing!"
);
inc_new_counter_debug!("bank-register_tick-registered", 1);
let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) {
w_blockhash_queue.register_hash(hash, self.fee_rate_governor.lamports_per_signature);
self.update_recent_blockhashes_locked(&w_blockhash_queue);
}
// ReplayStage will start computing the accounts delta hash when it
// detects the tick height has reached the boundary, so the system
// needs to guarantee all account updates for the slot have been
// committed before this tick height is incremented (like the blockhash
// sysvar above)
self.tick_height.fetch_add(1, Relaxed);
}
pub fn is_complete(&self) -> bool {
self.tick_height() == self.max_tick_height()
}
pub fn is_block_boundary(&self, tick_height: u64) -> bool {
tick_height % self.ticks_per_slot == 0
}
/// Prepare a transaction batch from a list of legacy transactions. Used for tests only.
pub fn prepare_batch_for_tests(&self, txs: Vec<Transaction>) -> TransactionBatch {
let sanitized_txs = txs
.into_iter()
.map(SanitizedTransaction::from_transaction_for_tests)
.collect::<Vec<_>>();
let lock_results = self.rc.accounts.lock_accounts(sanitized_txs.iter());
TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs))
}
/// Prepare a transaction batch from a list of versioned transactions from
/// an entry. Used for tests only.
pub fn prepare_entry_batch(&self, txs: Vec<VersionedTransaction>) -> Result<TransactionBatch> {
let sanitized_txs = txs
.into_iter()
.map(|tx| {
let message_hash = tx.message.hash();
SanitizedTransaction::try_create(tx, message_hash, None, |_| {
Err(TransactionError::UnsupportedVersion)
})
})
.collect::<Result<Vec<_>>>()?;
let lock_results = self.rc.accounts.lock_accounts(sanitized_txs.iter());
Ok(TransactionBatch::new(
lock_results,
self,
Cow::Owned(sanitized_txs),
))
}
/// Prepare a locked transaction batch from a list of sanitized transactions.
pub fn prepare_sanitized_batch<'a, 'b>(
&'a self,
txs: &'b [SanitizedTransaction],
) -> TransactionBatch<'a, 'b> {
let lock_results = self.rc.accounts.lock_accounts(txs.iter());
TransactionBatch::new(lock_results, self, Cow::Borrowed(txs))
}
/// Prepare a locked transaction batch from a list of sanitized transactions, and their cost
/// limited packing status
pub fn prepare_sanitized_batch_with_results<'a, 'b>(
&'a self,
transactions: &'b [SanitizedTransaction],
transaction_results: impl Iterator<Item = Result<()>>,
) -> TransactionBatch<'a, 'b> {
// this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit
let lock_results = self
.rc
.accounts
.lock_accounts_with_results(transactions.iter(), transaction_results);
TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions))
}
/// Prepare a transaction batch without locking accounts for transaction simulation.
pub(crate) fn prepare_simulation_batch<'a>(
&'a self,
transaction: SanitizedTransaction,
) -> TransactionBatch<'a, '_> {
let mut batch = TransactionBatch::new(vec![Ok(())], self, Cow::Owned(vec![transaction]));
batch.needs_unlock = false;
batch
}
/// Run transactions against a frozen bank without committing the results
pub fn simulate_transaction(
&self,
transaction: SanitizedTransaction,
) -> TransactionSimulationResult {
assert!(self.is_frozen(), "simulation bank must be frozen");
self.simulate_transaction_unchecked(transaction)
}
/// Run transactions against a bank without committing the results; does not check if the bank
/// is frozen, enabling use in single-Bank test frameworks
pub fn simulate_transaction_unchecked(
&self,
transaction: SanitizedTransaction,
) -> TransactionSimulationResult {
let number_of_accounts = transaction.message().account_keys_len();
let batch = self.prepare_simulation_batch(transaction);
let mut timings = ExecuteTimings::default();
let (
loaded_transactions,
executed,
_inner_instructions,
logs,
_retryable_transactions,
_transaction_count,
_signature_count,
) = self.load_and_execute_transactions(
&batch,
// After simulation, transactions will need to be forwarded to the leader
// for processing. During forwarding, the transaction could expire if the
// delay is not accounted for.
MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY,
false,
true,
&mut timings,
);
let result = executed[0].0.clone().map(|_| ());
let logs = logs.get(0).cloned().flatten().unwrap_or_default();
let post_simulation_accounts = loaded_transactions
.into_iter()
.next()
.unwrap()
.0
.ok()
.map(|loaded_transaction| {
loaded_transaction
.accounts
.into_iter()
.take(number_of_accounts)
.collect::<Vec<_>>()
})
.unwrap_or_default();
let units_consumed = timings
.details
.per_program_timings
.iter()
.fold(0, |acc: u64, (_, program_timing)| {
acc.saturating_add(program_timing.accumulated_units)
});
debug!("simulate_transaction: {:?}", timings);
TransactionSimulationResult {
result,
logs,
post_simulation_accounts,
units_consumed,
}
}
pub fn unlock_accounts(&self, batch: &mut TransactionBatch) {
if batch.needs_unlock {
batch.needs_unlock = false;
self.rc
.accounts
.unlock_accounts(batch.sanitized_transactions().iter(), batch.lock_results())
}
}
pub fn remove_unrooted_slots(&self, slots: &[(Slot, BankId)]) {
self.rc.accounts.accounts_db.remove_unrooted_slots(slots)
}
pub fn set_shrink_paths(&self, paths: Vec<PathBuf>) {
self.rc.accounts.accounts_db.set_shrink_paths(paths);
}
fn check_age<'a>(
&self,
txs: impl Iterator<Item = &'a SanitizedTransaction>,
lock_results: &[Result<()>],
max_age: usize,
error_counters: &mut ErrorCounters,
) -> Vec<TransactionCheckResult> {
let hash_queue = self.blockhash_queue.read().unwrap();
txs.zip(lock_results)
.map(|(tx, lock_res)| match lock_res {
Ok(()) => {
let recent_blockhash = tx.message().recent_blockhash();
let hash_age = hash_queue.check_hash_age(recent_blockhash, max_age);
if hash_age == Some(true) {
(Ok(()), None)
} else if let Some((address, account)) = self.check_transaction_for_nonce(tx) {
(Ok(()), Some(NoncePartial::new(address, account)))
} else if hash_age == Some(false) {
error_counters.blockhash_too_old += 1;
(Err(TransactionError::BlockhashNotFound), None)
} else {
error_counters.blockhash_not_found += 1;
(Err(TransactionError::BlockhashNotFound), None)
}
}
Err(e) => (Err(e.clone()), None),
})
.collect()
}
fn is_transaction_already_processed(
&self,
sanitized_tx: &SanitizedTransaction,
status_cache: &StatusCache<Result<()>>,
) -> bool {
let key = sanitized_tx.message_hash();
let transaction_blockhash = sanitized_tx.message().recent_blockhash();
status_cache
.get_status(key, transaction_blockhash, &self.ancestors)
.is_some()
}
fn check_status_cache(
&self,
sanitized_txs: &[SanitizedTransaction],
lock_results: Vec<TransactionCheckResult>,
error_counters: &mut ErrorCounters,
) -> Vec<TransactionCheckResult> {
let rcache = self.src.status_cache.read().unwrap();
sanitized_txs
.iter()
.zip(lock_results)
.map(|(sanitized_tx, (lock_result, nonce))| {
if lock_result.is_ok()
&& self.is_transaction_already_processed(sanitized_tx, &rcache)
{
error_counters.already_processed += 1;
return (Err(TransactionError::AlreadyProcessed), None);
}
(lock_result, nonce)
})
.collect()
}
pub fn check_hash_age(&self, hash: &Hash, max_age: usize) -> Option<bool> {
self.blockhash_queue
.read()
.unwrap()
.check_hash_age(hash, max_age)
}
pub fn check_transaction_for_nonce(
&self,
tx: &SanitizedTransaction,
) -> Option<TransactionAccount> {
tx.get_durable_nonce(self.feature_set.is_active(&nonce_must_be_writable::id()))
.and_then(|nonce_address| {
self.get_account_with_fixed_root(nonce_address)
.map(|nonce_account| (*nonce_address, nonce_account))
})
.filter(|(_, nonce_account)| {
nonce_account::verify_nonce_account(nonce_account, tx.message().recent_blockhash())
})
}
pub fn check_transactions(
&self,
sanitized_txs: &[SanitizedTransaction],
lock_results: &[Result<()>],
max_age: usize,
error_counters: &mut ErrorCounters,
) -> Vec<TransactionCheckResult> {
let age_results =
self.check_age(sanitized_txs.iter(), lock_results, max_age, error_counters);
self.check_status_cache(sanitized_txs, age_results, error_counters)
}
pub fn collect_balances(&self, batch: &TransactionBatch) -> TransactionBalances {
let mut balances: TransactionBalances = vec![];
for transaction in batch.sanitized_transactions() {
let mut transaction_balances: Vec<u64> = vec![];
for account_key in transaction.message().account_keys_iter() {
transaction_balances.push(self.get_balance(account_key));
}
balances.push(transaction_balances);
}
balances
}
#[allow(clippy::cognitive_complexity)]
fn update_error_counters(error_counters: &ErrorCounters) {
if 0 != error_counters.total {
inc_new_counter_info!(
"bank-process_transactions-error_count",
error_counters.total
);
}
if 0 != error_counters.account_not_found {
inc_new_counter_info!(
"bank-process_transactions-account_not_found",
error_counters.account_not_found
);
}
if 0 != error_counters.account_in_use {
inc_new_counter_info!(
"bank-process_transactions-account_in_use",
error_counters.account_in_use
);
}
if 0 != error_counters.account_loaded_twice {
inc_new_counter_info!(
"bank-process_transactions-account_loaded_twice",
error_counters.account_loaded_twice
);
}
if 0 != error_counters.blockhash_not_found {
inc_new_counter_info!(
"bank-process_transactions-error-blockhash_not_found",
error_counters.blockhash_not_found
);
}
if 0 != error_counters.blockhash_too_old {
inc_new_counter_info!(
"bank-process_transactions-error-blockhash_too_old",
error_counters.blockhash_too_old
);
}
if 0 != error_counters.invalid_account_index {
inc_new_counter_info!(
"bank-process_transactions-error-invalid_account_index",
error_counters.invalid_account_index
);
}
if 0 != error_counters.invalid_account_for_fee {
inc_new_counter_info!(
"bank-process_transactions-error-invalid_account_for_fee",
error_counters.invalid_account_for_fee
);
}
if 0 != error_counters.insufficient_funds {
inc_new_counter_info!(
"bank-process_transactions-error-insufficient_funds",
error_counters.insufficient_funds
);
}
if 0 != error_counters.instruction_error {
inc_new_counter_info!(
"bank-process_transactions-error-instruction_error",
error_counters.instruction_error
);
}
if 0 != error_counters.already_processed {
inc_new_counter_info!(
"bank-process_transactions-error-already_processed",
error_counters.already_processed
);
}
if 0 != error_counters.not_allowed_during_cluster_maintenance {
inc_new_counter_info!(
"bank-process_transactions-error-cluster-maintenance",
error_counters.not_allowed_during_cluster_maintenance
);
}
if 0 != error_counters.invalid_writable_account {
inc_new_counter_info!(
"bank-process_transactions-error-invalid_writable_account",
error_counters.invalid_writable_account
);
}
}
/// Get any cached executors needed by the transaction
fn get_executors(
&self,
message: &SanitizedMessage,
accounts: &[TransactionAccount],
program_indices: &[Vec<usize>],
) -> Rc<RefCell<Executors>> {
let mut num_executors = message.account_keys_len();
for program_indices_of_instruction in program_indices.iter() {
num_executors += program_indices_of_instruction.len();
}
let mut executors = HashMap::with_capacity(num_executors);
let cow_cache = self.cached_executors.read().unwrap();
let cache = cow_cache.read().unwrap();
for key in message.account_keys_iter() {
if let Some(executor) = cache.get(key) {
executors.insert(*key, executor);
}
}
for program_indices_of_instruction in program_indices.iter() {
for account_index in program_indices_of_instruction.iter() {
let key = accounts[*account_index].0;
if let Some(executor) = cache.get(&key) {
executors.insert(key, executor);
}
}
}
Rc::new(RefCell::new(Executors {
executors,
is_dirty: false,
}))
}
/// Add executors back to the bank's cache if modified
fn update_executors(&self, executors: Rc<RefCell<Executors>>) {
let executors = executors.borrow();
if executors.is_dirty {
let mut cow_cache = self.cached_executors.write().unwrap();
let mut cache = cow_cache.write().unwrap();
for (key, executor) in executors.executors.iter() {
cache.put(key, (*executor).clone());
}
}
}
/// Remove an executor from the bank's cache
pub fn remove_executor(&self, pubkey: &Pubkey) {
let mut cow_cache = self.cached_executors.write().unwrap();
let mut cache = cow_cache.write().unwrap();
cache.remove(pubkey);
}
#[allow(clippy::type_complexity)]
pub fn load_and_execute_transactions(
&self,
batch: &TransactionBatch,
max_age: usize,
enable_cpi_recording: bool,
enable_log_recording: bool,
timings: &mut ExecuteTimings,
) -> (
Vec<TransactionLoadResult>,
Vec<TransactionExecutionResult>,
Vec<Option<InnerInstructionsList>>,
Vec<Option<TransactionLogMessages>>,
Vec<usize>,
u64,
u64,
) {
let sanitized_txs = batch.sanitized_transactions();
debug!("processing transactions: {}", sanitized_txs.len());
inc_new_counter_info!("bank-process_transactions", sanitized_txs.len());
let mut error_counters = ErrorCounters::default();
let retryable_txs: Vec<_> = batch
.lock_results()
.iter()
.enumerate()
.filter_map(|(index, res)| match res {
Err(TransactionError::AccountInUse) => {
error_counters.account_in_use += 1;
Some(index)
}
Err(TransactionError::WouldExceedMaxBlockCostLimit)
| Err(TransactionError::WouldExceedMaxAccountCostLimit)
| Err(TransactionError::WouldExceedMaxAccountDataCostLimit) => Some(index),
Err(_) => None,
Ok(_) => None,
})
.collect();
let mut check_time = Measure::start("check_transactions");
let check_results = self.check_transactions(
sanitized_txs,
batch.lock_results(),
max_age,
&mut error_counters,
);
check_time.stop();
let mut load_time = Measure::start("accounts_load");
let mut loaded_txs = self.rc.accounts.load_accounts(
&self.ancestors,
sanitized_txs,
check_results,
&self.blockhash_queue.read().unwrap(),
&mut error_counters,
&self.rent_collector,
&self.feature_set,
);
load_time.stop();
let mut execution_time = Measure::start("execution_time");
let mut signature_count: u64 = 0;
let mut inner_instructions: Vec<Option<InnerInstructionsList>> =
Vec::with_capacity(sanitized_txs.len());
let mut transaction_log_messages: Vec<Option<Vec<String>>> =
Vec::with_capacity(sanitized_txs.len());
let executed: Vec<TransactionExecutionResult> = loaded_txs
.iter_mut()
.zip(sanitized_txs.iter())
.map(|(accs, tx)| match accs {
(Err(e), _nonce) => {
transaction_log_messages.push(None);
inner_instructions.push(None);
(Err(e.clone()), None)
}
(Ok(loaded_transaction), nonce) => {
let feature_set = self.feature_set.clone();
signature_count += u64::from(tx.message().header().num_required_signatures);
let mut compute_budget = self.compute_budget.unwrap_or_else(ComputeBudget::new);
let mut process_result = if feature_set.is_active(&tx_wide_compute_cap::id()) {
compute_budget.process_transaction(tx, feature_set.clone())
} else {
Ok(())
};
if process_result.is_ok() {
let executors = self.get_executors(
tx.message(),
&loaded_transaction.accounts,
&loaded_transaction.program_indices,
);
let mut transaction_accounts = Vec::new();
std::mem::swap(&mut loaded_transaction.accounts, &mut transaction_accounts);
let transaction_context = TransactionContext::new(
transaction_accounts,
compute_budget.max_invoke_depth,
);
let instruction_recorder = if enable_cpi_recording {
Some(InstructionRecorder::new_ref(
tx.message().instructions().len(),
))
} else {
None
};
let log_collector = if enable_log_recording {
Some(LogCollector::new_ref())
} else {
None
};
let (blockhash, lamports_per_signature) =
self.last_blockhash_and_lamports_per_signature();
if let Some(legacy_message) = tx.message().legacy_message() {
process_result = MessageProcessor::process_message(
&self.builtin_programs.vec,
legacy_message,
&loaded_transaction.program_indices,
&transaction_context,
self.rent_collector.rent,
log_collector.clone(),
executors.clone(),
instruction_recorder.clone(),
feature_set,
compute_budget,
&mut timings.details,
&*self.sysvar_cache.read().unwrap(),
blockhash,
lamports_per_signature,
self.load_accounts_data_len(),
)
.map(|process_result| {
self.store_accounts_data_len(process_result.accounts_data_len)
});
} else {
// TODO: support versioned messages
process_result = Err(TransactionError::UnsupportedVersion);
}
let log_messages: Option<TransactionLogMessages> =
log_collector.and_then(|log_collector| {
Rc::try_unwrap(log_collector)
.map(|log_collector| log_collector.into_inner().into())
.ok()
});
transaction_log_messages.push(log_messages);
inner_instructions.push(
instruction_recorder
.and_then(|instruction_recorder| {
Rc::try_unwrap(instruction_recorder).ok()
})
.map(|instruction_recorder| {
instruction_recorder.into_inner().deconstruct()
}),
);
loaded_transaction.accounts = transaction_context.deconstruct();
if process_result.is_ok() {
self.update_executors(executors);
}
} else {
transaction_log_messages.push(None);
inner_instructions.push(None);
}
let nonce = match &process_result {
Ok(_) => nonce.clone(), // May need to calculate the fee based on the nonce
Err(TransactionError::InstructionError(_, _)) => {
error_counters.instruction_error += 1;
nonce.clone() // May need to advance the nonce
}
_ => None,
};
(process_result, nonce)
}
})
.collect();
execution_time.stop();
debug!(
"check: {}us load: {}us execute: {}us txs_len={}",
check_time.as_us(),
load_time.as_us(),
execution_time.as_us(),
sanitized_txs.len(),
);
timings.check_us = timings.check_us.saturating_add(check_time.as_us());
timings.load_us = timings.load_us.saturating_add(load_time.as_us());
timings.execute_us = timings.execute_us.saturating_add(execution_time.as_us());
let mut tx_count: u64 = 0;
let err_count = &mut error_counters.total;
let transaction_log_collector_config =
self.transaction_log_collector_config.read().unwrap();
for (i, ((r, _nonce), tx)) in executed.iter().zip(sanitized_txs).enumerate() {
if let Some(debug_keys) = &self.transaction_debug_keys {
for key in tx.message().account_keys_iter() {
if debug_keys.contains(key) {
info!("slot: {} result: {:?} tx: {:?}", self.slot, r, tx);
break;
}
}
}
if Self::can_commit(r) // Skip log collection for unprocessed transactions
&& transaction_log_collector_config.filter != TransactionLogCollectorFilter::None
{
let mut filtered_mentioned_addresses = Vec::new();
if !transaction_log_collector_config
.mentioned_addresses
.is_empty()
{
for key in tx.message().account_keys_iter() {
if transaction_log_collector_config
.mentioned_addresses
.contains(key)
{
filtered_mentioned_addresses.push(*key);
}
}
}
let is_vote = is_simple_vote_transaction(tx);
let store = match transaction_log_collector_config.filter {
TransactionLogCollectorFilter::All => {
!is_vote || !filtered_mentioned_addresses.is_empty()
}
TransactionLogCollectorFilter::AllWithVotes => true,
TransactionLogCollectorFilter::None => false,
TransactionLogCollectorFilter::OnlyMentionedAddresses => {
!filtered_mentioned_addresses.is_empty()
}
};
if store {
if let Some(log_messages) = transaction_log_messages.get(i).cloned().flatten() {
let mut transaction_log_collector =
self.transaction_log_collector.write().unwrap();
let transaction_log_index = transaction_log_collector.logs.len();
transaction_log_collector.logs.push(TransactionLogInfo {
signature: *tx.signature(),
result: r.clone(),
is_vote,
log_messages,
});
for key in filtered_mentioned_addresses.into_iter() {
transaction_log_collector
.mentioned_address_map
.entry(key)
.or_default()
.push(transaction_log_index);
}
}
}
}
if r.is_ok() {
tx_count += 1;
} else {
if *err_count == 0 {
debug!("tx error: {:?} {:?}", r, tx);
}
*err_count += 1;
}
}
if *err_count > 0 {
debug!(
"{} errors of {} txs",
*err_count,
*err_count as u64 + tx_count
);
}
Self::update_error_counters(&error_counters);
(
loaded_txs,
executed,
inner_instructions,
transaction_log_messages,
retryable_txs,
tx_count,
signature_count,
)
}
/// Load the accounts data len
fn load_accounts_data_len(&self) -> u64 {
self.accounts_data_len.load(Acquire)
}
/// Store a new value to the accounts data len
fn store_accounts_data_len(&self, accounts_data_len: u64) {
self.accounts_data_len.store(accounts_data_len, Release)
}
/// Calculate fee for `SanitizedMessage`
pub fn calculate_fee(message: &SanitizedMessage, lamports_per_signature: u64) -> u64 {
let mut num_signatures = u64::from(message.header().num_required_signatures);
for (program_id, instruction) in message.program_instructions_iter() {
if secp256k1_program::check_id(program_id) || ed25519_program::check_id(program_id) {
if let Some(num_verifies) = instruction.data.get(0) {
num_signatures = num_signatures.saturating_add(u64::from(*num_verifies));
}
}
}
lamports_per_signature.saturating_mul(num_signatures)
}
fn filter_program_errors_and_collect_fee(
&self,
txs: &[SanitizedTransaction],
execution_results: &[TransactionExecutionResult],
) -> Vec<Result<()>> {
let hash_queue = self.blockhash_queue.read().unwrap();
let mut fees = 0;
let results = txs
.iter()
.zip(execution_results)
.map(|(tx, (execution_result, nonce))| {
let (lamports_per_signature, is_nonce) = nonce
.as_ref()
.map(|nonce| nonce.lamports_per_signature())
.map(|maybe_lamports_per_signature| (maybe_lamports_per_signature, true))
.unwrap_or_else(|| {
(
hash_queue.get_lamports_per_signature(tx.message().recent_blockhash()),
false,
)
});
let lamports_per_signature =
lamports_per_signature.ok_or(TransactionError::BlockhashNotFound)?;
let fee = Self::calculate_fee(tx.message(), lamports_per_signature);
match *execution_result {
Err(TransactionError::InstructionError(_, _)) => {
// In case of instruction error, even though no accounts
// were stored we still need to charge the payer the
// fee.
//
//...except nonce accounts, which already have their
// post-load, fee deducted, pre-execute account state
// stored
if !is_nonce {
self.withdraw(tx.message().fee_payer(), fee)?;
}
fees += fee;
Ok(())
}
Ok(()) => {
fees += fee;
Ok(())
}
_ => execution_result.clone(),
}
})
.collect();
self.collector_fees.fetch_add(fees, Relaxed);
results
}
pub fn commit_transactions(
&self,
sanitized_txs: &[SanitizedTransaction],
loaded_txs: &mut [TransactionLoadResult],
executed_results: &[TransactionExecutionResult],
tx_count: u64,
signature_count: u64,
timings: &mut ExecuteTimings,
) -> TransactionResults {
assert!(
!self.freeze_started(),
"commit_transactions() working on a bank that is already frozen or is undergoing freezing!"
);
self.increment_transaction_count(tx_count);
self.increment_signature_count(signature_count);
inc_new_counter_info!("bank-process_transactions-txs", tx_count as usize);
inc_new_counter_info!("bank-process_transactions-sigs", signature_count as usize);
if !sanitized_txs.is_empty() {
let processed_tx_count = sanitized_txs.len() as u64;
let failed_tx_count = processed_tx_count.saturating_sub(tx_count);
self.transaction_error_count
.fetch_add(failed_tx_count, Relaxed);
self.transaction_entries_count.fetch_add(1, Relaxed);
self.transactions_per_entry_max
.fetch_max(processed_tx_count, Relaxed);
}
if executed_results
.iter()
.any(|(res, _)| Self::can_commit(res))
{
self.is_delta.store(true, Relaxed);
}
let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature();
let mut write_time = Measure::start("write_time");
self.rc.accounts.store_cached(
self.slot(),
sanitized_txs,
executed_results,
loaded_txs,
&self.rent_collector,
&blockhash,
lamports_per_signature,
self.rent_for_sysvars(),
self.leave_nonce_on_success(),
);
let rent_debits = self.collect_rent(executed_results, loaded_txs);
let mut update_stakes_cache_time = Measure::start("update_stakes_cache_time");
self.update_stakes_cache(sanitized_txs, executed_results, loaded_txs);
update_stakes_cache_time.stop();
// once committed there is no way to unroll
write_time.stop();
debug!(
"store: {}us txs_len={}",
write_time.as_us(),
sanitized_txs.len()
);
timings.store_us = timings.store_us.saturating_add(write_time.as_us());
timings.update_stakes_cache_us = timings
.update_stakes_cache_us
.saturating_add(update_stakes_cache_time.as_us());
self.update_transaction_statuses(sanitized_txs, executed_results);
let fee_collection_results =
self.filter_program_errors_and_collect_fee(sanitized_txs, executed_results);
TransactionResults {
fee_collection_results,
execution_results: executed_results.to_vec(),
rent_debits,
}
}
// Distribute collected rent fees for this slot to staked validators (excluding stakers)
// according to stake.
//
// The nature of rent fee is the cost of doing business, every validator has to hold (or have
// access to) the same list of accounts, so we pay according to stake, which is a rough proxy for
// value to the network.
//
// Currently, rent distribution doesn't consider given validator's uptime at all (this might
// change). That's because rent should be rewarded for the storage resource utilization cost.
// It's treated differently from transaction fees, which is for the computing resource
// utilization cost.
//
// We can't use collector_id (which is rotated according to stake-weighted leader schedule)
// as an approximation to the ideal rent distribution to simplify and avoid this per-slot
// computation for the distribution (time: N log N, space: N acct. stores; N = # of
// validators).
// The reason is that rent fee doesn't need to be incentivized for throughput unlike transaction
// fees
//
// Ref: collect_fees
#[allow(clippy::needless_collect)]
fn distribute_rent_to_validators(
&self,
vote_accounts: &HashMap<Pubkey, (/*stake:*/ u64, VoteAccount)>,
rent_to_be_distributed: u64,
) {
let mut total_staked = 0;
// Collect the stake associated with each validator.
// Note that a validator may be present in this vector multiple times if it happens to have
// more than one staked vote account somehow
let mut validator_stakes = vote_accounts
.iter()
.filter_map(|(_vote_pubkey, (staked, account))| {
if *staked == 0 {
None
} else {
total_staked += *staked;
let node_pubkey = account.vote_state().as_ref().ok()?.node_pubkey;
Some((node_pubkey, *staked))
}
})
.collect::<Vec<(Pubkey, u64)>>();
#[cfg(test)]
if validator_stakes.is_empty() {
// some tests bank.freezes() with bad staking state
self.capitalization
.fetch_sub(rent_to_be_distributed, Relaxed);
return;
}
#[cfg(not(test))]
assert!(!validator_stakes.is_empty());
// Sort first by stake and then by validator identity pubkey for determinism
validator_stakes.sort_by(|(pubkey1, staked1), (pubkey2, staked2)| {
match staked2.cmp(staked1) {
std::cmp::Ordering::Equal => pubkey2.cmp(pubkey1),
other => other,
}
});
let enforce_fix = self.no_overflow_rent_distribution_enabled();
let mut rent_distributed_in_initial_round = 0;
let validator_rent_shares = validator_stakes
.into_iter()
.map(|(pubkey, staked)| {
let rent_share = if !enforce_fix {
(((staked * rent_to_be_distributed) as f64) / (total_staked as f64)) as u64
} else {
(((staked as u128) * (rent_to_be_distributed as u128)) / (total_staked as u128))
.try_into()
.unwrap()
};
rent_distributed_in_initial_round += rent_share;
(pubkey, rent_share)
})
.collect::<Vec<(Pubkey, u64)>>();
// Leftover lamports after fraction calculation, will be paid to validators starting from highest stake
// holder
let mut leftover_lamports = rent_to_be_distributed - rent_distributed_in_initial_round;
let mut rewards = vec![];
validator_rent_shares
.into_iter()
.for_each(|(pubkey, rent_share)| {
let rent_to_be_paid = if leftover_lamports > 0 {
leftover_lamports -= 1;
rent_share + 1
} else {
rent_share
};
if !enforce_fix || rent_to_be_paid > 0 {
let mut account = self
.get_account_with_fixed_root(&pubkey)
.unwrap_or_default();
if account.checked_add_lamports(rent_to_be_paid).is_err() {
// overflow adding lamports
self.capitalization.fetch_sub(rent_to_be_paid, Relaxed);
error!(
"Burned {} rent lamports instead of sending to {}",
rent_to_be_paid, pubkey
);
inc_new_counter_error!(
"bank-burned_rent_lamports",
rent_to_be_paid as usize
);
} else {
self.store_account(&pubkey, &account);
rewards.push((
pubkey,
RewardInfo {
reward_type: RewardType::Rent,
lamports: rent_to_be_paid as i64,
post_balance: account.lamports(),
commission: None,
},
));
}
}
});
self.rewards.write().unwrap().append(&mut rewards);
if enforce_fix {
assert_eq!(leftover_lamports, 0);
} else if leftover_lamports != 0 {
warn!(
"There was leftover from rent distribution: {}",
leftover_lamports
);
self.capitalization.fetch_sub(leftover_lamports, Relaxed);
}
}
fn distribute_rent(&self) {
let total_rent_collected = self.collected_rent.load(Relaxed);
let (burned_portion, rent_to_be_distributed) = self
.rent_collector
.rent
.calculate_burn(total_rent_collected);
debug!(
"distributed rent: {} (rounded from: {}, burned: {})",
rent_to_be_distributed, total_rent_collected, burned_portion
);
self.capitalization.fetch_sub(burned_portion, Relaxed);
if rent_to_be_distributed == 0 {
return;
}
self.distribute_rent_to_validators(&self.vote_accounts(), rent_to_be_distributed);
}
fn collect_rent(
&self,
res: &[TransactionExecutionResult],
loaded_txs: &mut [TransactionLoadResult],
) -> Vec<RentDebits> {
let mut collected_rent: u64 = 0;
let mut rent_debits: Vec<RentDebits> = Vec::with_capacity(loaded_txs.len());
for (i, (raccs, _nonce)) in loaded_txs.iter_mut().enumerate() {
let (res, _nonce) = &res[i];
if res.is_err() || raccs.is_err() {
rent_debits.push(RentDebits::default());
continue;
}
let loaded_transaction = raccs.as_mut().unwrap();
collected_rent += loaded_transaction.rent;
rent_debits.push(mem::take(&mut loaded_transaction.rent_debits));
}
self.collected_rent.fetch_add(collected_rent, Relaxed);
rent_debits
}
fn run_incinerator(&self) {
if let Some((account, _)) =
self.get_account_modified_since_parent_with_fixed_root(&incinerator::id())
{
self.capitalization.fetch_sub(account.lamports(), Relaxed);
self.store_account(&incinerator::id(), &AccountSharedData::default());
}
}
fn collect_rent_eagerly(&self) {
if !self.enable_eager_rent_collection() {
return;
}
let mut measure = Measure::start("collect_rent_eagerly-ms");
let partitions = self.rent_collection_partitions();
let count = partitions.len();
let account_count: usize = partitions
.into_iter()
.map(|partition| self.collect_rent_in_partition(partition))
.sum();
measure.stop();
datapoint_info!(
"collect_rent_eagerly",
("accounts", account_count, i64),
("partitions", count, i64)
);
inc_new_counter_info!("collect_rent_eagerly-ms", measure.as_ms() as usize);
}
#[cfg(test)]
fn restore_old_behavior_for_fragile_tests(&self) {
self.lazy_rent_collection.store(true, Relaxed);
}
fn enable_eager_rent_collection(&self) -> bool {
if self.lazy_rent_collection.load(Relaxed) {
return false;
}
true
}
fn rent_collection_partitions(&self) -> Vec<Partition> {
if !self.use_fixed_collection_cycle() {
// This mode is for production/development/testing.
// In this mode, we iterate over the whole pubkey value range for each epochs
// including warm-up epochs.
// The only exception is the situation where normal epochs are relatively short
// (currently less than 2 day). In that case, we arrange a single collection
// cycle to be multiple of epochs so that a cycle could be greater than the 2 day.
self.variable_cycle_partitions()
} else {
// This mode is mainly for benchmarking only.
// In this mode, we always iterate over the whole pubkey value range with
// <slot_count_in_two_day> slots as a collection cycle, regardless warm-up or
// alignment between collection cycles and epochs.
// Thus, we can simulate stable processing load of eager rent collection,
// strictly proportional to the number of pubkeys since genesis.
self.fixed_cycle_partitions()
}
}
fn collect_rent_in_partition(&self, partition: Partition) -> usize {
let subrange = Self::pubkey_range_from_partition(partition);
let thread_pool = &self.rc.accounts.accounts_db.thread_pool;
self.rc
.accounts
.hold_range_in_memory(&subrange, true, thread_pool);
let accounts = self
.rc
.accounts
.load_to_collect_rent_eagerly(&self.ancestors, subrange.clone());
let account_count = accounts.len();
// parallelize?
let rent_for_sysvars = self.rent_for_sysvars();
let mut total_rent = 0;
let mut rent_debits = RentDebits::default();
for (pubkey, mut account) in accounts {
let rent = self.rent_collector.collect_from_existing_account(
&pubkey,
&mut account,
rent_for_sysvars,
self.rc.accounts.accounts_db.filler_account_suffix.as_ref(),
);
total_rent += rent;
// Store all of them unconditionally to purge old AppendVec,
// even if collected rent is 0 (= not updated).
// Also, there's another subtle side-effect from this: this
// ensures we verify the whole on-chain state (= all accounts)
// via the account delta hash slowly once per an epoch.
self.store_account(&pubkey, &account);
rent_debits.insert(&pubkey, rent, account.lamports());
}
self.collected_rent.fetch_add(total_rent, Relaxed);
self.rewards
.write()
.unwrap()
.extend(rent_debits.into_unordered_rewards_iter());
self.rc
.accounts
.hold_range_in_memory(&subrange, false, thread_pool);
account_count
}
// Mostly, the pair (start_index & end_index) is equivalent to this range:
// start_index..=end_index. But it has some exceptional cases, including
// this important and valid one:
// 0..=0: the first partition in the new epoch when crossing epochs
pub fn pubkey_range_from_partition(
(start_index, end_index, partition_count): Partition,
) -> RangeInclusive<Pubkey> {
assert!(start_index <= end_index);
assert!(start_index < partition_count);
assert!(end_index < partition_count);
assert!(0 < partition_count);
type Prefix = u64;
const PREFIX_SIZE: usize = mem::size_of::<Prefix>();
const PREFIX_MAX: Prefix = Prefix::max_value();
let mut start_pubkey = [0x00u8; 32];
let mut end_pubkey = [0xffu8; 32];
if partition_count == 1 {
assert_eq!(start_index, 0);
assert_eq!(end_index, 0);
return Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey);
}
// not-overflowing way of `(Prefix::max_value() + 1) / partition_count`
let partition_width = (PREFIX_MAX - partition_count + 1) / partition_count + 1;
let mut start_key_prefix = if start_index == 0 && end_index == 0 {
0
} else if start_index + 1 == partition_count {
PREFIX_MAX
} else {
(start_index + 1) * partition_width
};
let mut end_key_prefix = if end_index + 1 == partition_count {
PREFIX_MAX
} else {
(end_index + 1) * partition_width - 1
};
if start_index != 0 && start_index == end_index {
// n..=n (n != 0): a noop pair across epochs without a gap under
// multi_epoch_cycle, just nullify it.
if end_key_prefix == PREFIX_MAX {
start_key_prefix = end_key_prefix;
start_pubkey = end_pubkey;
} else {
end_key_prefix = start_key_prefix;
end_pubkey = start_pubkey;
}
}
start_pubkey[0..PREFIX_SIZE].copy_from_slice(&start_key_prefix.to_be_bytes());
end_pubkey[0..PREFIX_SIZE].copy_from_slice(&end_key_prefix.to_be_bytes());
trace!(
"pubkey_range_from_partition: ({}-{})/{} [{}]: {}-{}",
start_index,
end_index,
partition_count,
(end_key_prefix - start_key_prefix),
start_pubkey.iter().map(|x| format!("{:02x}", x)).join(""),
end_pubkey.iter().map(|x| format!("{:02x}", x)).join(""),
);
// should be an inclusive range (a closed interval) like this:
// [0xgg00-0xhhff], [0xii00-0xjjff], ... (where 0xii00 == 0xhhff + 1)
Pubkey::new_from_array(start_pubkey)..=Pubkey::new_from_array(end_pubkey)
}
pub fn get_partitions(
slot: Slot,
parent_slot: Slot,
slot_count_in_two_day: SlotCount,
) -> Vec<Partition> {
let parent_cycle = parent_slot / slot_count_in_two_day;
let current_cycle = slot / slot_count_in_two_day;
let mut parent_cycle_index = parent_slot % slot_count_in_two_day;
let current_cycle_index = slot % slot_count_in_two_day;
let mut partitions = vec![];
if parent_cycle < current_cycle {
if current_cycle_index > 0 {
// generate and push gapped partitions because some slots are skipped
let parent_last_cycle_index = slot_count_in_two_day - 1;
// ... for parent cycle
partitions.push((
parent_cycle_index,
parent_last_cycle_index,
slot_count_in_two_day,
));
// ... for current cycle
partitions.push((0, 0, slot_count_in_two_day));
}
parent_cycle_index = 0;
}
partitions.push((
parent_cycle_index,
current_cycle_index,
slot_count_in_two_day,
));
partitions
}
fn fixed_cycle_partitions(&self) -> Vec<Partition> {
let slot_count_in_two_day = self.slot_count_in_two_day();
Self::get_partitions(self.slot(), self.parent_slot(), slot_count_in_two_day)
}
/// used only by filler accounts in debug path
/// previous means slot - 1, not parent
pub fn variable_cycle_partition_from_previous_slot(
epoch_schedule: &EpochSchedule,
slot: Slot,
) -> Partition {
// similar code to Bank::variable_cycle_partitions
let (current_epoch, current_slot_index) = epoch_schedule.get_epoch_and_slot_index(slot);
let (parent_epoch, mut parent_slot_index) =
epoch_schedule.get_epoch_and_slot_index(slot.saturating_sub(1));
let cycle_params = Self::rent_single_epoch_collection_cycle_params(
current_epoch,
epoch_schedule.get_slots_in_epoch(current_epoch),
);
if parent_epoch < current_epoch {
parent_slot_index = 0;
}
let generated_for_gapped_epochs = false;
Self::get_partition_from_slot_indexes(
cycle_params,
parent_slot_index,
current_slot_index,
generated_for_gapped_epochs,
)
}
fn variable_cycle_partitions(&self) -> Vec<Partition> {
let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot());
let (parent_epoch, mut parent_slot_index) =
self.get_epoch_and_slot_index(self.parent_slot());
let mut partitions = vec![];
if parent_epoch < current_epoch {
let slot_skipped = (self.slot() - self.parent_slot()) > 1;
if slot_skipped {
// Generate special partitions because there are skipped slots
// exactly at the epoch transition.
let parent_last_slot_index = self.get_slots_in_epoch(parent_epoch) - 1;
// ... for parent epoch
partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
parent_slot_index,
parent_last_slot_index,
parent_epoch,
));
if current_slot_index > 0 {
// ... for current epoch
partitions.push(self.partition_from_slot_indexes_with_gapped_epochs(
0,
0,
current_epoch,
));
}
}
parent_slot_index = 0;
}
partitions.push(self.partition_from_normal_slot_indexes(
parent_slot_index,
current_slot_index,
current_epoch,
));
partitions
}
fn do_partition_from_slot_indexes(
&self,
start_slot_index: SlotIndex,
end_slot_index: SlotIndex,
epoch: Epoch,
generated_for_gapped_epochs: bool,
) -> Partition {
let cycle_params = self.determine_collection_cycle_params(epoch);
Self::get_partition_from_slot_indexes(
cycle_params,
start_slot_index,
end_slot_index,
generated_for_gapped_epochs,
)
}
fn get_partition_from_slot_indexes(
cycle_params: RentCollectionCycleParams,
start_slot_index: SlotIndex,
end_slot_index: SlotIndex,
generated_for_gapped_epochs: bool,
) -> Partition {
let (_, _, in_multi_epoch_cycle, _, _, partition_count) = cycle_params;
// use common codepath for both very likely and very unlikely for the sake of minimized
// risk of any miscalculation instead of negligibly faster computation per slot for the
// likely case.
let mut start_partition_index =
Self::partition_index_from_slot_index(start_slot_index, cycle_params);
let mut end_partition_index =
Self::partition_index_from_slot_index(end_slot_index, cycle_params);
// Adjust partition index for some edge cases
let is_special_new_epoch = start_slot_index == 0 && end_slot_index != 1;
let in_middle_of_cycle = start_partition_index > 0;
if in_multi_epoch_cycle && is_special_new_epoch && in_middle_of_cycle {
// Adjust slot indexes so that the final partition ranges are continuous!
// This is need because the caller gives us off-by-one indexes when
// an epoch boundary is crossed.
// Usually there is no need for this adjustment because cycles are aligned
// with epochs. But for multi-epoch cycles, adjust the indexes if it
// happens in the middle of a cycle for both gapped and not-gapped cases:
//
// epoch (slot range)|slot idx.*1|raw part. idx.|adj. part. idx.|epoch boundary
// ------------------+-----------+--------------+---------------+--------------
// 3 (20..30) | [7..8] | 7.. 8 | 7.. 8
// | [8..9] | 8.. 9 | 8.. 9
// 4 (30..40) | [0..0] |<10>..10 | <9>..10 <--- not gapped
// | [0..1] | 10..11 | 10..12
// | [1..2] | 11..12 | 11..12
// | [2..9 *2| 12..19 | 12..19 <-+
// 5 (40..50) | 0..0 *2|<20>..<20> |<19>..<19> *3 <-+- gapped
// | 0..4] |<20>..24 |<19>..24 <-+
// | [4..5] | 24..25 | 24..25
// | [5..6] | 25..26 | 25..26
//
// NOTE: <..> means the adjusted slots
//
// *1: The range of parent_bank.slot() and current_bank.slot() is firstly
// split by the epoch boundaries and then the split ones are given to us.
// The original ranges are denoted as [...]
// *2: These are marked with generated_for_gapped_epochs = true.
// *3: This becomes no-op partition
start_partition_index -= 1;
if generated_for_gapped_epochs {
assert_eq!(start_slot_index, end_slot_index);
end_partition_index -= 1;
}
}
(start_partition_index, end_partition_index, partition_count)
}
fn partition_from_normal_slot_indexes(
&self,
start_slot_index: SlotIndex,
end_slot_index: SlotIndex,
epoch: Epoch,
) -> Partition {
self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, false)
}
fn partition_from_slot_indexes_with_gapped_epochs(
&self,
start_slot_index: SlotIndex,
end_slot_index: SlotIndex,
epoch: Epoch,
) -> Partition {
self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, true)
}
fn rent_single_epoch_collection_cycle_params(
epoch: Epoch,
slot_count_per_epoch: SlotCount,
) -> RentCollectionCycleParams {
(
epoch,
slot_count_per_epoch,
false,
0,
1,
slot_count_per_epoch,
)
}
fn determine_collection_cycle_params(&self, epoch: Epoch) -> RentCollectionCycleParams {
let slot_count_per_epoch = self.get_slots_in_epoch(epoch);
if !self.use_multi_epoch_collection_cycle(epoch) {
// mnb should always go through this code path
Self::rent_single_epoch_collection_cycle_params(epoch, slot_count_per_epoch)
} else {
let epoch_count_in_cycle = self.slot_count_in_two_day() / slot_count_per_epoch;
let partition_count = slot_count_per_epoch * epoch_count_in_cycle;
(
epoch,
slot_count_per_epoch,
true,
self.first_normal_epoch(),
epoch_count_in_cycle,
partition_count,
)
}
}
fn partition_index_from_slot_index(
slot_index_in_epoch: SlotIndex,
(
epoch,
slot_count_per_epoch,
_,
base_epoch,
epoch_count_per_cycle,
_,
): RentCollectionCycleParams,
) -> PartitionIndex {
let epoch_offset = epoch - base_epoch;
let epoch_index_in_cycle = epoch_offset % epoch_count_per_cycle;
slot_index_in_epoch + epoch_index_in_cycle * slot_count_per_epoch
}
// Given short epochs, it's too costly to collect rent eagerly
// within an epoch, so lower the frequency of it.
// These logic isn't strictly eager anymore and should only be used
// for development/performance purpose.
// Absolutely not under ClusterType::MainnetBeta!!!!
fn use_multi_epoch_collection_cycle(&self, epoch: Epoch) -> bool {
// Force normal behavior, disabling multi epoch collection cycle for manual local testing
#[cfg(not(test))]
if self.slot_count_per_normal_epoch() == solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
{
return false;
}
epoch >= self.first_normal_epoch()
&& self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
}
fn use_fixed_collection_cycle(&self) -> bool {
// Force normal behavior, disabling fixed collection cycle for manual local testing
#[cfg(not(test))]
if self.slot_count_per_normal_epoch() == solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH
{
return false;
}
self.cluster_type() != ClusterType::MainnetBeta
&& self.slot_count_per_normal_epoch() < self.slot_count_in_two_day()
}
fn slot_count_in_two_day(&self) -> SlotCount {
Self::slot_count_in_two_day_helper(self.ticks_per_slot)
}
// This value is specially chosen to align with slots per epoch in mainnet-beta and testnet
// Also, assume 500GB account data set as the extreme, then for 2 day (=48 hours) to collect
// rent eagerly, we'll consume 5.7 MB/s IO bandwidth, bidirectionally.
pub fn slot_count_in_two_day_helper(ticks_per_slot: SlotCount) -> SlotCount {
2 * DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY / ticks_per_slot
}
fn slot_count_per_normal_epoch(&self) -> SlotCount {
self.get_slots_in_epoch(self.first_normal_epoch())
}
pub fn cluster_type(&self) -> ClusterType {
// unwrap is safe; self.cluster_type is ensured to be Some() always...
// we only using Option here for ABI compatibility...
self.cluster_type.unwrap()
}
/// Process a batch of transactions.
#[must_use]
pub fn load_execute_and_commit_transactions(
&self,
batch: &TransactionBatch,
max_age: usize,
collect_balances: bool,
enable_cpi_recording: bool,
enable_log_recording: bool,
timings: &mut ExecuteTimings,
) -> (
TransactionResults,
TransactionBalancesSet,
Vec<Option<InnerInstructionsList>>,
Vec<Option<TransactionLogMessages>>,
) {
let pre_balances = if collect_balances {
self.collect_balances(batch)
} else {
vec![]
};
let (
mut loaded_txs,
executed,
inner_instructions,
transaction_logs,
_,
tx_count,
signature_count,
) = self.load_and_execute_transactions(
batch,
max_age,
enable_cpi_recording,
enable_log_recording,
timings,
);
let results = self.commit_transactions(
batch.sanitized_transactions(),
&mut loaded_txs,
&executed,
tx_count,
signature_count,
timings,
);
let post_balances = if collect_balances {
self.collect_balances(batch)
} else {
vec![]
};
(
results,
TransactionBalancesSet::new(pre_balances, post_balances),
inner_instructions,
transaction_logs,
)
}
/// Process a Transaction. This is used for unit tests and simply calls the vector
/// Bank::process_transactions method.
pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
self.try_process_transactions(std::iter::once(tx))?[0].clone()?;
tx.signatures
.get(0)
.map_or(Ok(()), |sig| self.get_signature_status(sig).unwrap())
}
/// Process multiple transaction in a single batch. This is used for benches and unit tests.
///
/// # Panics
///
/// Panics if any of the transactions do not pass sanitization checks.
#[must_use]
pub fn process_transactions<'a>(
&self,
txs: impl Iterator<Item = &'a Transaction>,
) -> Vec<Result<()>> {
self.try_process_transactions(txs).unwrap()
}
/// Process multiple transaction in a single batch. This is used for benches and unit tests.
/// Short circuits if any of the transactions do not pass sanitization checks.
pub fn try_process_transactions<'a>(
&self,
txs: impl Iterator<Item = &'a Transaction>,
) -> Result<Vec<Result<()>>> {
let txs = txs
.map(|tx| VersionedTransaction::from(tx.clone()))
.collect();
self.try_process_entry_transactions(txs)
}
/// Process entry transactions in a single batch. This is used for benches and unit tests.
///
/// # Panics
///
/// Panics if any of the transactions do not pass sanitization checks.
#[must_use]
pub fn process_entry_transactions(&self, txs: Vec<VersionedTransaction>) -> Vec<Result<()>> {
self.try_process_entry_transactions(txs).unwrap()
}
/// Process multiple transaction in a single batch. This is used for benches and unit tests.
/// Short circuits if any of the transactions do not pass sanitization checks.
pub fn try_process_entry_transactions(
&self,
txs: Vec<VersionedTransaction>,
) -> Result<Vec<Result<()>>> {
let batch = self.prepare_entry_batch(txs)?;
Ok(self.process_transaction_batch(&batch))
}
#[must_use]
fn process_transaction_batch(&self, batch: &TransactionBatch) -> Vec<Result<()>> {
self.load_execute_and_commit_transactions(
batch,
MAX_PROCESSING_AGE,
false,
false,
false,
&mut ExecuteTimings::default(),
)
.0
.fee_collection_results
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` lamports where `blockhash` is the last Entry ID observed by the client.
pub fn transfer(&self, n: u64, keypair: &Keypair, to: &Pubkey) -> Result<Signature> {
let blockhash = self.last_blockhash();
let tx = system_transaction::transfer(keypair, to, n, blockhash);
let signature = tx.signatures[0];
self.process_transaction(&tx).map(|_| signature)
}
pub fn read_balance(account: &AccountSharedData) -> u64 {
account.lamports()
}
/// Each program would need to be able to introspect its own state
/// this is hard-coded to the Budget language
pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
self.get_account(pubkey)
.map(|x| Self::read_balance(&x))
.unwrap_or(0)
}
/// Compute all the parents of the bank in order
pub fn parents(&self) -> Vec<Arc<Bank>> {
let mut parents = vec![];
let mut bank = self.parent();
while let Some(parent) = bank {
parents.push(parent.clone());
bank = parent.parent();
}
parents
}
/// Compute all the parents of the bank including this bank itself
pub fn parents_inclusive(self: Arc<Self>) -> Vec<Arc<Bank>> {
let mut parents = self.parents();
parents.insert(0, self);
parents
}
pub fn store_account(&self, pubkey: &Pubkey, account: &AccountSharedData) {
assert!(!self.freeze_started());
self.rc
.accounts
.store_slow_cached(self.slot(), pubkey, account);
self.stakes_cache.check_and_store(pubkey, account);
}
pub fn force_flush_accounts_cache(&self) {
self.rc
.accounts
.accounts_db
.flush_accounts_cache(true, Some(self.slot()))
}
pub fn flush_accounts_cache_if_needed(&self) {
self.rc
.accounts
.accounts_db
.flush_accounts_cache(false, Some(self.slot()))
}
#[cfg(test)]
pub fn flush_accounts_cache_slot(&self) {
self.rc
.accounts
.accounts_db
.flush_accounts_cache_slot(self.slot())
}
pub fn expire_old_recycle_stores(&self) {
self.rc.accounts.accounts_db.expire_old_recycle_stores()
}
/// Technically this issues (or even burns!) new lamports,
/// so be extra careful for its usage
fn store_account_and_update_capitalization(
&self,
pubkey: &Pubkey,
new_account: &AccountSharedData,
) {
if let Some(old_account) = self.get_account_with_fixed_root(pubkey) {
match new_account.lamports().cmp(&old_account.lamports()) {
std::cmp::Ordering::Greater => {
let increased = new_account.lamports() - old_account.lamports();
trace!(
"store_account_and_update_capitalization: increased: {} {}",
pubkey,
increased
);
self.capitalization.fetch_add(increased, Relaxed);
}
std::cmp::Ordering::Less => {
let decreased = old_account.lamports() - new_account.lamports();
trace!(
"store_account_and_update_capitalization: decreased: {} {}",
pubkey,
decreased
);
self.capitalization.fetch_sub(decreased, Relaxed);
}
std::cmp::Ordering::Equal => {}
}
} else {
trace!(
"store_account_and_update_capitalization: created: {} {}",
pubkey,
new_account.lamports()
);
self.capitalization
.fetch_add(new_account.lamports(), Relaxed);
}
self.store_account(pubkey, new_account);
}
fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> {
match self.get_account_with_fixed_root(pubkey) {
Some(mut account) => {
let min_balance = match get_system_account_kind(&account) {
Some(SystemAccountKind::Nonce) => self
.rent_collector
.rent
.minimum_balance(nonce::State::size()),
_ => 0,
};
lamports
.checked_add(min_balance)
.filter(|required_balance| *required_balance <= account.lamports())
.ok_or(TransactionError::InsufficientFundsForFee)?;
account
.checked_sub_lamports(lamports)
.map_err(|_| TransactionError::InsufficientFundsForFee)?;
self.store_account(pubkey, &account);
Ok(())
}
None => Err(TransactionError::AccountNotFound),
}
}
pub fn deposit(
&self,
pubkey: &Pubkey,
lamports: u64,
) -> std::result::Result<u64, LamportsError> {
// This doesn't collect rents intentionally.
// Rents should only be applied to actual TXes
let mut account = self.get_account_with_fixed_root(pubkey).unwrap_or_default();
account.checked_add_lamports(lamports)?;
self.store_account(pubkey, &account);
Ok(account.lamports())
}
pub fn accounts(&self) -> Arc<Accounts> {
self.rc.accounts.clone()
}
fn finish_init(
&mut self,
genesis_config: &GenesisConfig,
additional_builtins: Option<&Builtins>,
debug_do_not_add_builtins: bool,
) {
self.rewards_pool_pubkeys =
Arc::new(genesis_config.rewards_pools.keys().cloned().collect());
let mut builtins = builtins::get();
if let Some(additional_builtins) = additional_builtins {
builtins
.genesis_builtins
.extend_from_slice(&additional_builtins.genesis_builtins);
builtins
.feature_builtins
.extend_from_slice(&additional_builtins.feature_builtins);
}
if !debug_do_not_add_builtins {
for builtin in builtins.genesis_builtins {
self.add_builtin(
&builtin.name,
&builtin.id,
builtin.process_instruction_with_context,
);
}
for precompile in get_precompiles() {
if precompile.feature.is_none() {
self.add_precompile(&precompile.program_id);
}
}
}
self.feature_builtins = Arc::new(builtins.feature_builtins);
self.apply_feature_activations(true, debug_do_not_add_builtins);
}
pub fn set_inflation(&self, inflation: Inflation) {
*self.inflation.write().unwrap() = inflation;
}
pub fn set_compute_budget(&mut self, compute_budget: Option<ComputeBudget>) {
self.compute_budget = compute_budget;
}
pub fn hard_forks(&self) -> Arc<RwLock<HardForks>> {
self.hard_forks.clone()
}
// Hi! leaky abstraction here....
// try to use get_account_with_fixed_root() if it's called ONLY from on-chain runtime account
// processing. That alternative fn provides more safety.
pub fn get_account(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
self.get_account_modified_slot(pubkey)
.map(|(acc, _slot)| acc)
}
// Hi! leaky abstraction here....
// use this over get_account() if it's called ONLY from on-chain runtime account
// processing (i.e. from in-band replay/banking stage; that ensures root is *fixed* while
// running).
// pro: safer assertion can be enabled inside AccountsDb
// con: panics!() if called from off-chain processing
pub fn get_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
self.load_slow_with_fixed_root(&self.ancestors, pubkey)
.map(|(acc, _slot)| acc)
}
pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> {
self.load_slow(&self.ancestors, pubkey)
}
fn load_slow(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
// get_account (= primary this fn caller) may be called from on-chain Bank code even if we
// try hard to use get_account_with_fixed_root for that purpose...
// so pass safer LoadHint:Unspecified here as a fallback
self.rc.accounts.load_without_fixed_root(ancestors, pubkey)
}
fn load_slow_with_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.rc.accounts.load_with_fixed_root(ancestors, pubkey)
}
// Exclude self to really fetch the parent Bank's account hash and data.
//
// Being idempotent is needed to make the lazy initialization possible,
// especially for update_slot_hashes at the moment, which can be called
// multiple times with the same parent_slot in the case of forking.
//
// Generally, all of sysvar update granularity should be slot boundaries.
//
// This behavior is deprecated... See comment in update_sysvar_account() for details
fn get_sysvar_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option<AccountSharedData> {
let mut ancestors = self.ancestors.clone();
ancestors.remove(&self.slot());
self.rc
.accounts
.load_with_fixed_root(&ancestors, pubkey)
.map(|(acc, _slot)| acc)
}
pub fn get_program_accounts(
&self,
program_id: &Pubkey,
config: &ScanConfig,
) -> ScanResult<Vec<TransactionAccount>> {
self.rc
.accounts
.load_by_program(&self.ancestors, self.bank_id, program_id, config)
}
pub fn get_filtered_program_accounts<F: Fn(&AccountSharedData) -> bool>(
&self,
program_id: &Pubkey,
filter: F,
config: &ScanConfig,
) -> ScanResult<Vec<TransactionAccount>> {
self.rc.accounts.load_by_program_with_filter(
&self.ancestors,
self.bank_id,
program_id,
filter,
config,
)
}
pub fn get_filtered_indexed_accounts<F: Fn(&AccountSharedData) -> bool>(
&self,
index_key: &IndexKey,
filter: F,
config: &ScanConfig,
byte_limit_for_scan: Option<usize>,
) -> ScanResult<Vec<TransactionAccount>> {
self.rc.accounts.load_by_index_key_with_filter(
&self.ancestors,
self.bank_id,
index_key,
filter,
config,
byte_limit_for_scan,
)
}
pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool {
self.rc.accounts.account_indexes_include_key(key)
}
pub fn get_all_accounts_with_modified_slots(
&self,
) -> ScanResult<Vec<(Pubkey, AccountSharedData, Slot)>> {
self.rc.accounts.load_all(&self.ancestors, self.bank_id)
}
pub fn get_program_accounts_modified_since_parent(
&self,
program_id: &Pubkey,
) -> Vec<TransactionAccount> {
self.rc
.accounts
.load_by_program_slot(self.slot(), Some(program_id))
}
pub fn get_transaction_logs(
&self,
address: Option<&Pubkey>,
) -> Option<Vec<TransactionLogInfo>> {
self.transaction_log_collector
.read()
.unwrap()
.get_logs_for_address(address)
}
pub fn get_all_accounts_modified_since_parent(&self) -> Vec<TransactionAccount> {
self.rc.accounts.load_by_program_slot(self.slot(), None)
}
// if you want get_account_modified_since_parent without fixed_root, please define so...
fn get_account_modified_since_parent_with_fixed_root(
&self,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
let just_self: Ancestors = Ancestors::from(vec![self.slot()]);
if let Some((account, slot)) = self.load_slow_with_fixed_root(&just_self, pubkey) {
if slot == self.slot() {
return Some((account, slot));
}
}
None
}
pub fn get_largest_accounts(
&self,
num: usize,
filter_by_address: &HashSet<Pubkey>,
filter: AccountAddressFilter,
) -> ScanResult<Vec<(Pubkey, u64)>> {
self.rc.accounts.load_largest_accounts(
&self.ancestors,
self.bank_id,
num,
filter_by_address,
filter,
)
}
pub fn transaction_count(&self) -> u64 {
self.transaction_count.load(Relaxed)
}
pub fn transaction_error_count(&self) -> u64 {
self.transaction_error_count.load(Relaxed)
}
pub fn transaction_entries_count(&self) -> u64 {
self.transaction_entries_count.load(Relaxed)
}
pub fn transactions_per_entry_max(&self) -> u64 {
self.transactions_per_entry_max.load(Relaxed)
}
fn increment_transaction_count(&self, tx_count: u64) {
self.transaction_count.fetch_add(tx_count, Relaxed);
}
pub fn signature_count(&self) -> u64 {
self.signature_count.load(Relaxed)
}
fn increment_signature_count(&self, signature_count: u64) {
self.signature_count.fetch_add(signature_count, Relaxed);
}
pub fn get_signature_status_processed_since_parent(
&self,
signature: &Signature,
) -> Option<Result<()>> {
if let Some((slot, status)) = self.get_signature_status_slot(signature) {
if slot <= self.slot() {
return Some(status);
}
}
None
}
pub fn get_signature_status_with_blockhash(
&self,
signature: &Signature,
blockhash: &Hash,
) -> Option<Result<()>> {
let rcache = self.src.status_cache.read().unwrap();
rcache
.get_status(signature, blockhash, &self.ancestors)
.map(|v| v.1)
}
pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> {
let rcache = self.src.status_cache.read().unwrap();
rcache.get_status_any_blockhash(signature, &self.ancestors)
}
pub fn get_signature_status(&self, signature: &Signature) -> Option<Result<()>> {
self.get_signature_status_slot(signature).map(|v| v.1)
}
pub fn has_signature(&self, signature: &Signature) -> bool {
self.get_signature_status_slot(signature).is_some()
}
/// Hash the `accounts` HashMap. This represents a validator's interpretation
/// of the delta of the ledger since the last vote and up to now
fn hash_internal_state(&self) -> Hash {
// If there are no accounts, return the hash of the previous state and the latest blockhash
let accounts_delta_hash = self.rc.accounts.bank_hash_info_at(self.slot());
let mut signature_count_buf = [0u8; 8];
LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count() as u64);
let mut hash = hashv(&[
self.parent_hash.as_ref(),
accounts_delta_hash.hash.as_ref(),
&signature_count_buf,
self.last_blockhash().as_ref(),
]);
if let Some(buf) = self
.hard_forks
.read()
.unwrap()
.get_hash_data(self.slot(), self.parent_slot())
{
info!("hard fork at bank {}", self.slot());
hash = extend_and_hash(&hash, &buf)
}
info!(
"bank frozen: {} hash: {} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}",
self.slot(),
hash,
accounts_delta_hash.hash,
self.signature_count(),
self.last_blockhash(),
self.capitalization(),
);
info!(
"accounts hash slot: {} stats: {:?}",
self.slot(),
accounts_delta_hash.stats,
);
hash
}
/// Recalculate the hash_internal_state from the account stores. Would be used to verify a
/// snapshot.
/// Only called from startup or test code.
#[must_use]
fn verify_bank_hash(&self, test_hash_calculation: bool) -> bool {
self.rc.accounts.verify_bank_hash_and_lamports(
self.slot(),
&self.ancestors,
self.capitalization(),
test_hash_calculation,
)
}
pub fn get_snapshot_storages(&self, base_slot: Option<Slot>) -> SnapshotStorages {
self.rc
.accounts
.accounts_db
.get_snapshot_storages(self.slot(), base_slot, None)
.0
}
#[must_use]
fn verify_hash(&self) -> bool {
assert!(self.is_frozen());
let calculated_hash = self.hash_internal_state();
let expected_hash = self.hash();
if calculated_hash == expected_hash {
true
} else {
warn!(
"verify failed: slot: {}, {} (calculated) != {} (expected)",
self.slot(),
calculated_hash,
expected_hash
);
false
}
}
pub fn verify_transaction(
&self,
tx: VersionedTransaction,
verification_mode: TransactionVerificationMode,
) -> Result<SanitizedTransaction> {
let sanitized_tx = {
let size =
bincode::serialized_size(&tx).map_err(|_| TransactionError::SanitizeFailure)?;
if size > PACKET_DATA_SIZE as u64 {
return Err(TransactionError::SanitizeFailure);
}
let message_hash = if verification_mode == TransactionVerificationMode::FullVerification
{
tx.verify_and_hash_message()?
} else {
tx.message.hash()
};
SanitizedTransaction::try_create(tx, message_hash, None, |_| {
Err(TransactionError::UnsupportedVersion)
})
}?;
if verification_mode == TransactionVerificationMode::HashAndVerifyPrecompiles
|| verification_mode == TransactionVerificationMode::FullVerification
{
sanitized_tx.verify_precompiles(&self.feature_set)?;
}
Ok(sanitized_tx)
}
pub fn calculate_capitalization(&self, debug_verify: bool) -> u64 {
let can_cached_slot_be_unflushed = true; // implied yes
self.rc.accounts.calculate_capitalization(
&self.ancestors,
self.slot(),
can_cached_slot_be_unflushed,
debug_verify,
)
}
pub fn calculate_and_verify_capitalization(&self, debug_verify: bool) -> bool {
let calculated = self.calculate_capitalization(debug_verify);
let expected = self.capitalization();
if calculated == expected {
true
} else {
warn!(
"Capitalization mismatch: calculated: {} != expected: {}",
calculated, expected
);
false
}
}
/// Forcibly overwrites current capitalization by actually recalculating accounts' balances.
/// This should only be used for developing purposes.
pub fn set_capitalization(&self) -> u64 {
let old = self.capitalization();
let debug_verify = true;
self.capitalization
.store(self.calculate_capitalization(debug_verify), Relaxed);
old
}
pub fn get_accounts_hash(&self) -> Hash {
self.rc.accounts.accounts_db.get_accounts_hash(self.slot)
}
pub fn get_thread_pool(&self) -> &ThreadPool {
&self.rc.accounts.accounts_db.thread_pool_clean
}
pub fn update_accounts_hash_with_index_option(
&self,
use_index: bool,
mut debug_verify: bool,
slots_per_epoch: Option<Slot>,
is_startup: bool,
) -> Hash {
let (hash, total_lamports) = self
.rc
.accounts
.accounts_db
.update_accounts_hash_with_index_option(
use_index,
debug_verify,
self.slot(),
&self.ancestors,
Some(self.capitalization()),
false,
slots_per_epoch,
is_startup,
);
if total_lamports != self.capitalization() {
datapoint_info!(
"capitalization_mismatch",
("slot", self.slot(), i64),
("calculated_lamports", total_lamports, i64),
("capitalization", self.capitalization(), i64),
);
if !debug_verify {
// cap mismatch detected. It has been logged to metrics above.
// Run both versions of the calculation to attempt to get more info.
debug_verify = true;
self.rc
.accounts
.accounts_db
.update_accounts_hash_with_index_option(
use_index,
debug_verify,
self.slot(),
&self.ancestors,
Some(self.capitalization()),
false,
slots_per_epoch,
is_startup,
);
}
panic!(
"capitalization_mismatch. slot: {}, calculated_lamports: {}, capitalization: {}",
self.slot(),
total_lamports,
self.capitalization()
);
}
hash
}
pub fn update_accounts_hash(&self) -> Hash {
self.update_accounts_hash_with_index_option(true, false, None, false)
}
/// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash
/// calculation and could shield other real accounts.
pub fn verify_snapshot_bank(
&self,
test_hash_calculation: bool,
accounts_db_skip_shrink: bool,
last_full_snapshot_slot: Option<Slot>,
) -> bool {
let mut clean_time = Measure::start("clean");
if !accounts_db_skip_shrink && self.slot() > 0 {
info!("cleaning..");
self.clean_accounts(true, true, last_full_snapshot_slot);
}
clean_time.stop();
self.rc
.accounts
.accounts_db
.accounts_index
.set_startup(true);
let mut shrink_all_slots_time = Measure::start("shrink_all_slots");
if !accounts_db_skip_shrink && self.slot() > 0 {
info!("shrinking..");
self.shrink_all_slots(true, last_full_snapshot_slot);
}
shrink_all_slots_time.stop();
info!("verify_bank_hash..");
let mut verify_time = Measure::start("verify_bank_hash");
let mut verify = self.verify_bank_hash(test_hash_calculation);
verify_time.stop();
self.rc
.accounts
.accounts_db
.accounts_index
.set_startup(false);
info!("verify_hash..");
let mut verify2_time = Measure::start("verify_hash");
// Order and short-circuiting is significant; verify_hash requires a valid bank hash
verify = verify && self.verify_hash();
verify2_time.stop();
datapoint_info!(
"verify_snapshot_bank",
("clean_us", clean_time.as_us(), i64),
("shrink_all_slots_us", shrink_all_slots_time.as_us(), i64),
("verify_bank_hash_us", verify_time.as_us(), i64),
("verify_hash_us", verify2_time.as_us(), i64),
);
verify
}
/// Return the number of hashes per tick
pub fn hashes_per_tick(&self) -> &Option<u64> {
&self.hashes_per_tick
}
/// Return the number of ticks per slot
pub fn ticks_per_slot(&self) -> u64 {
self.ticks_per_slot
}
/// Return the number of slots per year
pub fn slots_per_year(&self) -> f64 {
self.slots_per_year
}
/// Return the number of ticks since genesis.
pub fn tick_height(&self) -> u64 {
self.tick_height.load(Relaxed)
}
/// Return the inflation parameters of the Bank
pub fn inflation(&self) -> Inflation {
*self.inflation.read().unwrap()
}
pub fn rent_collector(&self) -> RentCollector {
self.rent_collector.clone()
}
/// Return the total capitalization of the Bank
pub fn capitalization(&self) -> u64 {
self.capitalization.load(Relaxed)
}
/// Return this bank's max_tick_height
pub fn max_tick_height(&self) -> u64 {
self.max_tick_height
}
/// Return the block_height of this bank
pub fn block_height(&self) -> u64 {
self.block_height
}
/// Return the number of slots per epoch for the given epoch
pub fn get_slots_in_epoch(&self, epoch: Epoch) -> u64 {
self.epoch_schedule.get_slots_in_epoch(epoch)
}
/// returns the epoch for which this bank's leader_schedule_slot_offset and slot would
/// need to cache leader_schedule
pub fn get_leader_schedule_epoch(&self, slot: Slot) -> Epoch {
self.epoch_schedule.get_leader_schedule_epoch(slot)
}
/// a bank-level cache of vote accounts and stake delegation info
fn update_stakes_cache(
&self,
txs: &[SanitizedTransaction],
res: &[TransactionExecutionResult],
loaded_txs: &[TransactionLoadResult],
) {
for (i, ((raccs, _load_nonce), tx)) in loaded_txs.iter().zip(txs).enumerate() {
let (res, _res_nonce) = &res[i];
if res.is_err() || raccs.is_err() {
continue;
}
let message = tx.message();
let loaded_transaction = raccs.as_ref().unwrap();
for (_i, (pubkey, account)) in
(0..message.account_keys_len()).zip(loaded_transaction.accounts.iter())
{
self.stakes_cache.check_and_store(pubkey, account);
}
}
}
pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
self.stakes_cache.stakes().staked_nodes()
}
/// current vote accounts for this bank along with the stake
/// attributed to each account
pub fn vote_accounts(&self) -> Arc<HashMap<Pubkey, (/*stake:*/ u64, VoteAccount)>> {
let stakes = self.stakes_cache.stakes();
Arc::from(stakes.vote_accounts())
}
/// Vote account for the given vote account pubkey along with the stake.
pub fn get_vote_account(&self, vote_account: &Pubkey) -> Option<(/*stake:*/ u64, VoteAccount)> {
let stakes = self.stakes_cache.stakes();
stakes.vote_accounts().get(vote_account).cloned()
}
/// Get the EpochStakes for a given epoch
pub fn epoch_stakes(&self, epoch: Epoch) -> Option<&EpochStakes> {
self.epoch_stakes.get(&epoch)
}
pub fn epoch_stakes_map(&self) -> &HashMap<Epoch, EpochStakes> {
&self.epoch_stakes
}
pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<Arc<HashMap<Pubkey, u64>>> {
Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
}
/// vote accounts for the specific epoch along with the stake
/// attributed to each account
pub fn epoch_vote_accounts(
&self,
epoch: Epoch,
) -> Option<&HashMap<Pubkey, (u64, VoteAccount)>> {
let epoch_stakes = self.epoch_stakes.get(&epoch)?.stakes();
Some(epoch_stakes.vote_accounts().as_ref())
}
/// Get the fixed authorized voter for the given vote account for the
/// current epoch
pub fn epoch_authorized_voter(&self, vote_account: &Pubkey) -> Option<&Pubkey> {
self.epoch_stakes
.get(&self.epoch)
.expect("Epoch stakes for bank's own epoch must exist")
.epoch_authorized_voters()
.get(vote_account)
}
/// Get the fixed set of vote accounts for the given node id for the
/// current epoch
pub fn epoch_vote_accounts_for_node_id(&self, node_id: &Pubkey) -> Option<&NodeVoteAccounts> {
self.epoch_stakes
.get(&self.epoch)
.expect("Epoch stakes for bank's own epoch must exist")
.node_id_to_vote_accounts()
.get(node_id)
}
/// Get the fixed total stake of all vote accounts for current epoch
pub fn total_epoch_stake(&self) -> u64 {
self.epoch_stakes
.get(&self.epoch)
.expect("Epoch stakes for bank's own epoch must exist")
.total_stake()
}
/// Get the fixed stake of the given vote account for the current epoch
pub fn epoch_vote_account_stake(&self, vote_account: &Pubkey) -> u64 {
*self
.epoch_vote_accounts(self.epoch())
.expect("Bank epoch vote accounts must contain entry for the bank's own epoch")
.get(vote_account)
.map(|(stake, _)| stake)
.unwrap_or(&0)
}
/// given a slot, return the epoch and offset into the epoch this slot falls
/// e.g. with a fixed number for slots_per_epoch, the calculation is simply:
///
/// ( slot/slots_per_epoch, slot % slots_per_epoch )
///
pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (Epoch, SlotIndex) {
self.epoch_schedule.get_epoch_and_slot_index(slot)
}
pub fn get_epoch_info(&self) -> EpochInfo {
let absolute_slot = self.slot();
let block_height = self.block_height();
let (epoch, slot_index) = self.get_epoch_and_slot_index(absolute_slot);
let slots_in_epoch = self.get_slots_in_epoch(epoch);
let transaction_count = Some(self.transaction_count());
EpochInfo {
epoch,
slot_index,
slots_in_epoch,
absolute_slot,
block_height,
transaction_count,
}
}
pub fn is_empty(&self) -> bool {
!self.is_delta.load(Relaxed)
}
/// Add an instruction processor to intercept instructions before the dynamic loader.
pub fn add_builtin(
&mut self,
name: &str,
program_id: &Pubkey,
process_instruction: ProcessInstructionWithContext,
) {
debug!("Adding program {} under {:?}", name, program_id);
self.add_builtin_account(name, program_id, false);
if let Some(entry) = self
.builtin_programs
.vec
.iter_mut()
.find(|entry| entry.program_id == *program_id)
{
entry.process_instruction = process_instruction;
} else {
self.builtin_programs.vec.push(BuiltinProgram {
program_id: *program_id,
process_instruction,
});
}
debug!("Added program {} under {:?}", name, program_id);
}
/// Replace a builtin instruction processor if it already exists
pub fn replace_builtin(
&mut self,
name: &str,
program_id: &Pubkey,
process_instruction: ProcessInstructionWithContext,
) {
debug!("Replacing program {} under {:?}", name, program_id);
self.add_builtin_account(name, program_id, true);
if let Some(entry) = self
.builtin_programs
.vec
.iter_mut()
.find(|entry| entry.program_id == *program_id)
{
entry.process_instruction = process_instruction;
}
debug!("Replaced program {} under {:?}", name, program_id);
}
/// Remove a builtin instruction processor if it already exists
pub fn remove_builtin(&mut self, name: &str, program_id: &Pubkey) {
debug!("Removing program {} under {:?}", name, program_id);
// Don't remove the account since the bank expects the account state to
// be idempotent
if let Some(position) = self
.builtin_programs
.vec
.iter()
.position(|entry| entry.program_id == *program_id)
{
self.builtin_programs.vec.remove(position);
}
debug!("Removed program {} under {:?}", name, program_id);
}
pub fn add_precompile(&mut self, program_id: &Pubkey) {
debug!("Adding precompiled program {}", program_id);
self.add_precompiled_account(program_id);
debug!("Added precompiled program {:?}", program_id);
}
pub fn clean_accounts(
&self,
skip_last: bool,
is_startup: bool,
last_full_snapshot_slot: Option<Slot>,
) {
// Don't clean the slot we're snapshotting because it may have zero-lamport
// accounts that were included in the bank delta hash when the bank was frozen,
// and if we clean them here, any newly created snapshot's hash for this bank
// may not match the frozen hash.
//
// So when we're snapshotting, set `skip_last` to true so the highest slot to clean is
// lowered by one.
let highest_slot_to_clean = skip_last.then(|| self.slot().saturating_sub(1));
self.rc.accounts.accounts_db.clean_accounts(
highest_slot_to_clean,
is_startup,
last_full_snapshot_slot,
);
}
pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option<Slot>) {
self.rc
.accounts
.accounts_db
.shrink_all_slots(is_startup, last_full_snapshot_slot);
}
pub fn print_accounts_stats(&self) {
self.rc.accounts.accounts_db.print_accounts_stats("");
}
pub fn process_stale_slot_with_budget(
&self,
mut consumed_budget: usize,
budget_recovery_delta: usize,
) -> usize {
if consumed_budget == 0 {
let shrunken_account_count = self.rc.accounts.accounts_db.process_stale_slot_v1();
if shrunken_account_count > 0 {
datapoint_info!(
"stale_slot_shrink",
("accounts", shrunken_account_count, i64)
);
consumed_budget += shrunken_account_count;
}
}
consumed_budget.saturating_sub(budget_recovery_delta)
}
pub fn shrink_candidate_slots(&self) -> usize {
self.rc.accounts.accounts_db.shrink_candidate_slots()
}
pub fn no_overflow_rent_distribution_enabled(&self) -> bool {
self.feature_set
.is_active(&feature_set::no_overflow_rent_distribution::id())
}
pub fn versioned_tx_message_enabled(&self) -> bool {
self.feature_set
.is_active(&feature_set::versioned_tx_message_enabled::id())
}
pub fn stake_program_advance_activating_credits_observed(&self) -> bool {
self.feature_set
.is_active(&feature_set::stake_program_advance_activating_credits_observed::id())
}
pub fn leave_nonce_on_success(&self) -> bool {
self.feature_set
.is_active(&feature_set::leave_nonce_on_success::id())
}
pub fn send_to_tpu_vote_port_enabled(&self) -> bool {
self.feature_set
.is_active(&feature_set::send_to_tpu_vote_port::id())
}
pub fn read_cost_tracker(&self) -> LockResult<RwLockReadGuard<CostTracker>> {
self.cost_tracker.read()
}
pub fn write_cost_tracker(&self) -> LockResult<RwLockWriteGuard<CostTracker>> {
self.cost_tracker.write()
}
// Check if the wallclock time from bank creation to now has exceeded the allotted
// time for transaction processing
pub fn should_bank_still_be_processing_txs(
bank_creation_time: &Instant,
max_tx_ingestion_nanos: u128,
) -> bool {
// Do this check outside of the poh lock, hence not a method on PohRecorder
bank_creation_time.elapsed().as_nanos() <= max_tx_ingestion_nanos
}
pub fn deactivate_feature(&mut self, id: &Pubkey) {
let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
feature_set.active.remove(id);
feature_set.inactive.insert(*id);
self.feature_set = Arc::new(feature_set);
}
pub fn activate_feature(&mut self, id: &Pubkey) {
let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
feature_set.inactive.remove(id);
feature_set.active.insert(*id, 0);
self.feature_set = Arc::new(feature_set);
}
pub fn fill_bank_with_ticks(&self) {
let parent_distance = if self.slot() == 0 {
1
} else {
self.slot() - self.parent_slot()
};
for _ in 0..parent_distance {
let last_blockhash = self.last_blockhash();
while self.last_blockhash() == last_blockhash {
self.register_tick(&Hash::new_unique())
}
}
}
// This is called from snapshot restore AND for each epoch boundary
// The entire code path herein must be idempotent
fn apply_feature_activations(
&mut self,
init_finish_or_warp: bool,
debug_do_not_add_builtins: bool,
) {
let new_feature_activations = self.compute_active_feature_set(!init_finish_or_warp);
if new_feature_activations.contains(&feature_set::pico_inflation::id()) {
*self.inflation.write().unwrap() = Inflation::pico();
self.fee_rate_governor.burn_percent = 50; // 50% fee burn
self.rent_collector.rent.burn_percent = 50; // 50% rent burn
}
if !new_feature_activations.is_disjoint(&self.feature_set.full_inflation_features_enabled())
{
*self.inflation.write().unwrap() = Inflation::full();
self.fee_rate_governor.burn_percent = 50; // 50% fee burn
self.rent_collector.rent.burn_percent = 50; // 50% rent burn
}
if new_feature_activations.contains(&feature_set::spl_token_v3_3_0_release::id()) {
self.apply_spl_token_v3_3_0_release();
}
if new_feature_activations.contains(&feature_set::rent_for_sysvars::id()) {
// when this feature is activated, immediately all of existing sysvars are susceptible
// to rent collection and account data removal due to insufficient balance due to only
// having 1 lamport.
// so before any is accessed, reset the balance to be rent-exempt here at the same
// timing when perpetual balance adjustment is started in update_sysvar_account().
self.reset_all_sysvar_balances();
}
if !debug_do_not_add_builtins {
self.ensure_feature_builtins(init_finish_or_warp, &new_feature_activations);
self.reconfigure_token2_native_mint();
}
self.ensure_no_storage_rewards_pool();
}
fn reset_all_sysvar_balances(&self) {
for sysvar_id in &[
sysvar::clock::id(),
sysvar::epoch_schedule::id(),
#[allow(deprecated)]
sysvar::fees::id(),
#[allow(deprecated)]
sysvar::recent_blockhashes::id(),
sysvar::rent::id(),
sysvar::rewards::id(),
sysvar::slot_hashes::id(),
sysvar::slot_history::id(),
sysvar::stake_history::id(),
] {
if let Some(mut account) = self.get_account(sysvar_id) {
let (old_data_len, old_lamports) = (account.data().len(), account.lamports());
self.adjust_sysvar_balance_for_rent(&mut account);
info!(
"reset_all_sysvar_balances (slot: {}): {} ({} bytes) is reset from {} to {}",
self.slot(),
sysvar_id,
old_data_len,
old_lamports,
account.lamports()
);
self.store_account_and_update_capitalization(sysvar_id, &account);
}
}
}
fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) {
account.set_lamports(
self.get_minimum_balance_for_rent_exemption(account.data().len())
.max(account.lamports()),
);
}
// Compute the active feature set based on the current bank state, and return the set of newly activated features
fn compute_active_feature_set(&mut self, allow_new_activations: bool) -> HashSet<Pubkey> {
let mut active = self.feature_set.active.clone();
let mut inactive = HashSet::new();
let mut newly_activated = HashSet::new();
let slot = self.slot();
for feature_id in &self.feature_set.inactive {
let mut activated = None;
if let Some(mut account) = self.get_account_with_fixed_root(feature_id) {
if let Some(mut feature) = feature::from_account(&account) {
match feature.activated_at {
None => {
if allow_new_activations {
// Feature has been requested, activate it now
feature.activated_at = Some(slot);
if feature::to_account(&feature, &mut account).is_some() {
self.store_account(feature_id, &account);
}
newly_activated.insert(*feature_id);
activated = Some(slot);
info!("Feature {} activated at slot {}", feature_id, slot);
}
}
Some(activation_slot) => {
if slot >= activation_slot {
// Feature is already active
activated = Some(activation_slot);
}
}
}
}
}
if let Some(slot) = activated {
active.insert(*feature_id, slot);
} else {
inactive.insert(*feature_id);
}
}
self.feature_set = Arc::new(FeatureSet { active, inactive });
newly_activated
}
fn ensure_feature_builtins(
&mut self,
init_or_warp: bool,
new_feature_activations: &HashSet<Pubkey>,
) {
let feature_builtins = self.feature_builtins.clone();
for (builtin, feature, activation_type) in feature_builtins.iter() {
let should_populate = init_or_warp && self.feature_set.is_active(feature)
|| !init_or_warp && new_feature_activations.contains(feature);
if should_populate {
match activation_type {
ActivationType::NewProgram => self.add_builtin(
&builtin.name,
&builtin.id,
builtin.process_instruction_with_context,
),
ActivationType::NewVersion => self.replace_builtin(
&builtin.name,
&builtin.id,
builtin.process_instruction_with_context,
),
ActivationType::RemoveProgram => {
self.remove_builtin(&builtin.name, &builtin.id)
}
}
}
}
for precompile in get_precompiles() {
#[allow(clippy::blocks_in_if_conditions)]
if precompile.feature.map_or(false, |ref feature_id| {
self.feature_set.is_active(feature_id)
}) {
self.add_precompile(&precompile.program_id);
}
}
}
fn apply_spl_token_v3_3_0_release(&mut self) {
if let Some(old_account) = self.get_account_with_fixed_root(&inline_spl_token::id()) {
if let Some(new_account) =
self.get_account_with_fixed_root(&inline_spl_token::new_token_program::id())
{
datapoint_info!(
"bank-apply_spl_token_v3_3_0_release",
("slot", self.slot, i64),
);
// Burn lamports in the old token account
self.capitalization
.fetch_sub(old_account.lamports(), Relaxed);
// Transfer new token account to old token account
self.store_account(&inline_spl_token::id(), &new_account);
// Clear new token account
self.store_account(
&inline_spl_token::new_token_program::id(),
&AccountSharedData::default(),
);
self.remove_executor(&inline_spl_token::id());
}
}
}
fn reconfigure_token2_native_mint(&mut self) {
let reconfigure_token2_native_mint = match self.cluster_type() {
ClusterType::Development => true,
ClusterType::Devnet => true,
ClusterType::Testnet => self.epoch() == 93,
ClusterType::MainnetBeta => self.epoch() == 75,
};
if reconfigure_token2_native_mint {
let mut native_mint_account = solana_sdk::account::AccountSharedData::from(Account {
owner: inline_spl_token::id(),
data: inline_spl_token::native_mint::ACCOUNT_DATA.to_vec(),
lamports: sol_to_lamports(1.),
executable: false,
rent_epoch: self.epoch() + 1,
});
// As a workaround for
// https://github.com/solana-labs/solana-program-library/issues/374, ensure that the
// spl-token 2 native mint account is owned by the spl-token 2 program.
let store = if let Some(existing_native_mint_account) =
self.get_account_with_fixed_root(&inline_spl_token::native_mint::id())
{
if existing_native_mint_account.owner() == &solana_sdk::system_program::id() {
native_mint_account.set_lamports(existing_native_mint_account.lamports());
true
} else {
false
}
} else {
self.capitalization
.fetch_add(native_mint_account.lamports(), Relaxed);
true
};
if store {
self.store_account(&inline_spl_token::native_mint::id(), &native_mint_account);
}
}
}
fn ensure_no_storage_rewards_pool(&mut self) {
let purge_window_epoch = match self.cluster_type() {
ClusterType::Development => false,
// never do this for devnet; we're pristine here. :)
ClusterType::Devnet => false,
// schedule to remove at testnet/tds
ClusterType::Testnet => self.epoch() == 93,
// never do this for stable; we're pristine here. :)
ClusterType::MainnetBeta => false,
};
if purge_window_epoch {
for reward_pubkey in self.rewards_pool_pubkeys.iter() {
if let Some(mut reward_account) = self.get_account_with_fixed_root(reward_pubkey) {
if reward_account.lamports() == u64::MAX {
reward_account.set_lamports(0);
self.store_account(reward_pubkey, &reward_account);
// Adjust capitalization.... it has been wrapping, reducing the real capitalization by 1-lamport
self.capitalization.fetch_add(1, Relaxed);
info!(
"purged rewards pool accont: {}, new capitalization: {}",
reward_pubkey,
self.capitalization()
);
}
};
}
}
}
fn rent_for_sysvars(&self) -> bool {
self.feature_set
.is_active(&feature_set::rent_for_sysvars::id())
}
/// Get all the accounts for this bank and calculate stats
pub fn get_total_accounts_stats(&self) -> ScanResult<TotalAccountsStats> {
let accounts = self.get_all_accounts_with_modified_slots()?;
Ok(self.calculate_total_accounts_stats(
accounts
.iter()
.map(|(pubkey, account, _slot)| (pubkey, account)),
))
}
/// Given all the accounts for a bank, calculate stats
pub fn calculate_total_accounts_stats<'a>(
&self,
accounts: impl Iterator<Item = (&'a Pubkey, &'a AccountSharedData)>,
) -> TotalAccountsStats {
let rent_collector = self.rent_collector();
let mut total_accounts_stats = TotalAccountsStats::default();
accounts.for_each(|(pubkey, account)| {
let data_len = account.data().len();
total_accounts_stats.num_accounts += 1;
total_accounts_stats.data_len += data_len;
if account.executable() {
total_accounts_stats.num_executable_accounts += 1;
total_accounts_stats.executable_data_len += data_len;
}
if !rent_collector.should_collect_rent(pubkey, account, false)
|| rent_collector.get_rent_due(account).1
{
total_accounts_stats.num_rent_exempt_accounts += 1;
} else {
total_accounts_stats.num_rent_paying_accounts += 1;
total_accounts_stats.lamports_in_rent_paying_accounts += account.lamports();
if data_len == 0 {
total_accounts_stats.num_rent_paying_accounts_without_data += 1;
}
}
});
total_accounts_stats
}
}
/// Struct to collect stats when scanning all accounts in `get_total_accounts_stats()`
#[derive(Debug, Default, Copy, Clone)]
pub struct TotalAccountsStats {
/// Total number of accounts
pub num_accounts: usize,
/// Total data size of all accounts
pub data_len: usize,
/// Total number of executable accounts
pub num_executable_accounts: usize,
/// Total data size of executable accounts
pub executable_data_len: usize,
/// Total number of rent exempt accounts
pub num_rent_exempt_accounts: usize,
/// Total number of rent paying accounts
pub num_rent_paying_accounts: usize,
/// Total number of rent paying accounts without data
pub num_rent_paying_accounts_without_data: usize,
/// Total amount of lamports in rent paying accounts
pub lamports_in_rent_paying_accounts: u64,
}
impl Drop for Bank {
fn drop(&mut self) {
if let Some(drop_callback) = self.drop_callback.read().unwrap().0.as_ref() {
drop_callback.callback(self);
} else {
// Default case
// 1. Tests
// 2. At startup when replaying blockstore and there's no
// AccountsBackgroundService to perform cleanups yet.
self.rc
.accounts
.purge_slot(self.slot(), self.bank_id(), false);
}
}
}
pub fn goto_end_of_slot(bank: &mut Bank) {
let mut tick_hash = bank.last_blockhash();
loop {
tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);
bank.register_tick(&tick_hash);
if tick_hash == bank.last_blockhash() {
bank.freeze();
return;
}
}
}
pub fn is_simple_vote_transaction(transaction: &SanitizedTransaction) -> bool {
if transaction.message().instructions().len() == 1 {
let (program_pubkey, instruction) = transaction
.message()
.program_instructions_iter()
.next()
.unwrap();
if program_pubkey == &solana_vote_program::id() {
if let Ok(vote_instruction) = limited_deserialize::<VoteInstruction>(&instruction.data)
{
return matches!(
vote_instruction,
VoteInstruction::Vote(_)
| VoteInstruction::VoteSwitch(_, _)
| VoteInstruction::UpdateVoteState(_)
| VoteInstruction::UpdateVoteStateSwitch(_, _)
);
}
}
}
false
}
#[cfg(test)]
pub(crate) mod tests {
#[allow(deprecated)]
use solana_sdk::sysvar::fees::Fees;
use {
super::*,
crate::{
accounts_background_service::{AbsRequestHandler, SendDroppedBankCallback},
accounts_db::DEFAULT_ACCOUNTS_SHRINK_RATIO,
accounts_index::{AccountIndex, AccountSecondaryIndexes, ScanError, ITER_BATCH_SIZE},
ancestors::Ancestors,
genesis_utils::{
activate_all_features, bootstrap_validator_stake_lamports,
create_genesis_config_with_leader, create_genesis_config_with_vote_accounts,
GenesisConfigInfo, ValidatorVoteKeypairs,
},
stake_delegations::StakeDelegations,
status_cache::MAX_CACHE_ENTRIES,
},
crossbeam_channel::{bounded, unbounded},
solana_program_runtime::invoke_context::InvokeContext,
solana_sdk::{
account::Account,
clock::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT},
compute_budget::ComputeBudgetInstruction,
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
feature::Feature,
feature_set::reject_empty_instruction_without_program,
genesis_config::create_genesis_config,
hash,
instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError},
keyed_account::keyed_account_at_index,
message::{Message, MessageHeader},
nonce,
poh_config::PohConfig,
rent::Rent,
signature::{keypair_from_seed, Keypair, Signer},
stake::{
instruction as stake_instruction,
state::{Authorized, Delegation, Lockup, Stake},
},
system_instruction::{self, SystemError},
system_program,
sysvar::rewards::Rewards,
timing::duration_as_s,
},
solana_vote_program::{
vote_instruction,
vote_state::{
self, BlockTimestamp, Vote, VoteInit, VoteState, VoteStateVersions,
MAX_LOCKOUT_HISTORY,
},
},
std::{result, thread::Builder, time::Duration},
};
impl Bank {
fn cloned_stake_delegations(&self) -> StakeDelegations {
self.stakes_cache.stakes().stake_delegations().clone()
}
}
fn new_sanitized_message(
instructions: &[Instruction],
payer: Option<&Pubkey>,
) -> SanitizedMessage {
Message::new(instructions, payer).try_into().unwrap()
}
#[test]
fn test_nonce_info() {
let lamports_per_signature = 42;
let nonce_authority = keypair_from_seed(&[0; 32]).unwrap();
let nonce_address = nonce_authority.pubkey();
let from = keypair_from_seed(&[1; 32]).unwrap();
let from_address = from.pubkey();
let to_address = Pubkey::new_unique();
let nonce_account = AccountSharedData::new_data(
43,
&nonce::state::Versions::new_current(nonce::State::Initialized(
nonce::state::Data::new(
Pubkey::default(),
Hash::new_unique(),
lamports_per_signature,
),
)),
&system_program::id(),
)
.unwrap();
let from_account = AccountSharedData::new(44, 0, &Pubkey::default());
let to_account = AccountSharedData::new(45, 0, &Pubkey::default());
let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default());
const TEST_RENT_DEBIT: u64 = 1;
let rent_collected_nonce_account = {
let mut account = nonce_account.clone();
account.set_lamports(nonce_account.lamports() - TEST_RENT_DEBIT);
account
};
let rent_collected_from_account = {
let mut account = from_account.clone();
account.set_lamports(from_account.lamports() - TEST_RENT_DEBIT);
account
};
let instructions = vec![
system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()),
system_instruction::transfer(&from_address, &to_address, 42),
];
// NoncePartial create + NonceInfo impl
let partial = NoncePartial::new(nonce_address, rent_collected_nonce_account.clone());
assert_eq!(*partial.address(), nonce_address);
assert_eq!(*partial.account(), rent_collected_nonce_account);
assert_eq!(
partial.lamports_per_signature(),
Some(lamports_per_signature)
);
assert_eq!(partial.fee_payer_account(), None);
// Add rent debits to ensure the rollback captures accounts without rent fees
let mut rent_debits = RentDebits::default();
rent_debits.insert(
&from_address,
TEST_RENT_DEBIT,
rent_collected_from_account.lamports(),
);
rent_debits.insert(
&nonce_address,
TEST_RENT_DEBIT,
rent_collected_nonce_account.lamports(),
);
// NonceFull create + NonceInfo impl
{
let message = new_sanitized_message(&instructions, Some(&from_address));
let accounts = [
(
*message.get_account_key(0).unwrap(),
rent_collected_from_account.clone(),
),
(
*message.get_account_key(1).unwrap(),
rent_collected_nonce_account.clone(),
),
(*message.get_account_key(2).unwrap(), to_account.clone()),
(
*message.get_account_key(3).unwrap(),
recent_blockhashes_sysvar_account.clone(),
),
];
let full = NonceFull::from_partial(partial.clone(), &message, &accounts, &rent_debits)
.unwrap();
assert_eq!(*full.address(), nonce_address);
assert_eq!(*full.account(), rent_collected_nonce_account);
assert_eq!(full.lamports_per_signature(), Some(lamports_per_signature));
assert_eq!(
full.fee_payer_account(),
Some(&from_account),
"rent debit should be refunded in captured fee account"
);
}
// Nonce account is fee-payer
{
let message = new_sanitized_message(&instructions, Some(&nonce_address));
let accounts = [
(
*message.get_account_key(0).unwrap(),
rent_collected_nonce_account,
),
(
*message.get_account_key(1).unwrap(),
rent_collected_from_account,
),
(*message.get_account_key(2).unwrap(), to_account),
(
*message.get_account_key(3).unwrap(),
recent_blockhashes_sysvar_account,
),
];
let full = NonceFull::from_partial(partial.clone(), &message, &accounts, &rent_debits)
.unwrap();
assert_eq!(*full.address(), nonce_address);
assert_eq!(*full.account(), nonce_account);
assert_eq!(full.lamports_per_signature(), Some(lamports_per_signature));
assert_eq!(full.fee_payer_account(), None);
}
// NonceFull create, fee-payer not in account_keys fails
{
let message = new_sanitized_message(&instructions, Some(&nonce_address));
assert_eq!(
NonceFull::from_partial(partial, &message, &[], &RentDebits::default())
.unwrap_err(),
TransactionError::AccountNotFound,
);
}
}
#[test]
fn test_bank_unix_timestamp_from_genesis() {
let (genesis_config, _mint_keypair) = create_genesis_config(1);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(
genesis_config.creation_time,
bank.unix_timestamp_from_genesis()
);
let slots_per_sec = 1.0
/ (duration_as_s(&genesis_config.poh_config.target_tick_duration)
* genesis_config.ticks_per_slot as f32);
for _i in 0..slots_per_sec as usize + 1 {
bank = Arc::new(new_from_parent(&bank));
}
assert!(bank.unix_timestamp_from_genesis() - genesis_config.creation_time >= 1);
}
#[test]
#[allow(clippy::float_cmp)]
fn test_bank_new() {
let dummy_leader_pubkey = solana_sdk::pubkey::new_rand();
let dummy_leader_stake_lamports = bootstrap_validator_stake_lamports();
let mint_lamports = 10_000;
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
voting_keypair,
..
} = create_genesis_config_with_leader(
mint_lamports,
&dummy_leader_pubkey,
dummy_leader_stake_lamports,
);
genesis_config.rent = Rent {
lamports_per_byte_year: 5,
exemption_threshold: 1.2,
burn_percent: 5,
};
let bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), mint_lamports);
assert_eq!(
bank.get_balance(&voting_keypair.pubkey()),
dummy_leader_stake_lamports /* 1 token goes to the vote account associated with dummy_leader_lamports */
);
let rent_account = bank.get_account(&sysvar::rent::id()).unwrap();
let rent = from_account::<sysvar::rent::Rent, _>(&rent_account).unwrap();
assert_eq!(rent.burn_percent, 5);
assert_eq!(rent.exemption_threshold, 1.2);
assert_eq!(rent.lamports_per_byte_year, 5);
}
#[test]
fn test_bank_block_height() {
let (genesis_config, _mint_keypair) = create_genesis_config(1);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank0.block_height(), 0);
let bank1 = Arc::new(new_from_parent(&bank0));
assert_eq!(bank1.block_height(), 1);
}
#[test]
fn test_bank_update_epoch_stakes() {
impl Bank {
fn epoch_stake_keys(&self) -> Vec<Epoch> {
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().copied().collect();
keys.sort_unstable();
keys
}
fn epoch_stake_key_info(&self) -> (Epoch, Epoch, usize) {
let mut keys: Vec<Epoch> = self.epoch_stakes.keys().copied().collect();
keys.sort_unstable();
(*keys.first().unwrap(), *keys.last().unwrap(), keys.len())
}
}
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
let mut bank = Bank::new_for_tests(&genesis_config);
let initial_epochs = bank.epoch_stake_keys();
assert_eq!(initial_epochs, vec![0, 1]);
for existing_epoch in &initial_epochs {
bank.update_epoch_stakes(*existing_epoch);
assert_eq!(bank.epoch_stake_keys(), initial_epochs);
}
for epoch in (initial_epochs.len() as Epoch)..MAX_LEADER_SCHEDULE_STAKES {
bank.update_epoch_stakes(epoch);
assert_eq!(bank.epoch_stakes.len() as Epoch, epoch + 1);
}
assert_eq!(
bank.epoch_stake_key_info(),
(
0,
MAX_LEADER_SCHEDULE_STAKES - 1,
MAX_LEADER_SCHEDULE_STAKES as usize
)
);
bank.update_epoch_stakes(MAX_LEADER_SCHEDULE_STAKES);
assert_eq!(
bank.epoch_stake_key_info(),
(
0,
MAX_LEADER_SCHEDULE_STAKES,
MAX_LEADER_SCHEDULE_STAKES as usize + 1
)
);
bank.update_epoch_stakes(MAX_LEADER_SCHEDULE_STAKES + 1);
assert_eq!(
bank.epoch_stake_key_info(),
(
1,
MAX_LEADER_SCHEDULE_STAKES + 1,
MAX_LEADER_SCHEDULE_STAKES as usize + 1
)
);
}
#[test]
fn test_bank_capitalization() {
let bank0 = Arc::new(Bank::new_for_tests(&GenesisConfig {
accounts: (0..42)
.map(|_| {
(
solana_sdk::pubkey::new_rand(),
Account::new(42, 0, &Pubkey::default()),
)
})
.collect(),
cluster_type: ClusterType::MainnetBeta,
..GenesisConfig::default()
}));
let sysvar_and_builtin_program_delta0 = 11;
assert_eq!(
bank0.capitalization(),
42 * 42 + sysvar_and_builtin_program_delta0
);
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
let sysvar_and_builtin_program_delta1 = 2;
assert_eq!(
bank1.capitalization(),
42 * 42 + sysvar_and_builtin_program_delta0 + sysvar_and_builtin_program_delta1,
);
}
#[test]
fn test_credit_debit_rent_no_side_effect_on_hash() {
solana_logger::setup();
let (mut genesis_config, _mint_keypair) = create_genesis_config(10);
let keypair1: Keypair = Keypair::new();
let keypair2: Keypair = Keypair::new();
let keypair3: Keypair = Keypair::new();
let keypair4: Keypair = Keypair::new();
// Transaction between these two keypairs will fail
let keypair5: Keypair = Keypair::new();
let keypair6: Keypair = Keypair::new();
genesis_config.rent = Rent {
lamports_per_byte_year: 1,
exemption_threshold: 21.0,
burn_percent: 10,
};
let root_bank = Arc::new(Bank::new_for_tests(&genesis_config));
let bank = Bank::new_from_parent(
&root_bank,
&Pubkey::default(),
years_as_slots(
2.0,
&genesis_config.poh_config.target_tick_duration,
genesis_config.ticks_per_slot,
) as u64,
);
let root_bank_2 = Arc::new(Bank::new_for_tests(&genesis_config));
let bank_with_success_txs = Bank::new_from_parent(
&root_bank_2,
&Pubkey::default(),
years_as_slots(
2.0,
&genesis_config.poh_config.target_tick_duration,
genesis_config.ticks_per_slot,
) as u64,
);
assert_eq!(bank.last_blockhash(), genesis_config.hash());
// Initialize credit-debit and credit only accounts
let account1 = AccountSharedData::new(264, 0, &Pubkey::default());
let account2 = AccountSharedData::new(264, 1, &Pubkey::default());
let account3 = AccountSharedData::new(264, 0, &Pubkey::default());
let account4 = AccountSharedData::new(264, 1, &Pubkey::default());
let account5 = AccountSharedData::new(10, 0, &Pubkey::default());
let account6 = AccountSharedData::new(10, 1, &Pubkey::default());
bank.store_account(&keypair1.pubkey(), &account1);
bank.store_account(&keypair2.pubkey(), &account2);
bank.store_account(&keypair3.pubkey(), &account3);
bank.store_account(&keypair4.pubkey(), &account4);
bank.store_account(&keypair5.pubkey(), &account5);
bank.store_account(&keypair6.pubkey(), &account6);
bank_with_success_txs.store_account(&keypair1.pubkey(), &account1);
bank_with_success_txs.store_account(&keypair2.pubkey(), &account2);
bank_with_success_txs.store_account(&keypair3.pubkey(), &account3);
bank_with_success_txs.store_account(&keypair4.pubkey(), &account4);
bank_with_success_txs.store_account(&keypair5.pubkey(), &account5);
bank_with_success_txs.store_account(&keypair6.pubkey(), &account6);
// Make builtin instruction loader rent exempt
let system_program_id = system_program::id();
let mut system_program_account = bank.get_account(&system_program_id).unwrap();
system_program_account.set_lamports(
bank.get_minimum_balance_for_rent_exemption(system_program_account.data().len()),
);
bank.store_account(&system_program_id, &system_program_account);
bank_with_success_txs.store_account(&system_program_id, &system_program_account);
let t1 =
system_transaction::transfer(&keypair1, &keypair2.pubkey(), 1, genesis_config.hash());
let t2 =
system_transaction::transfer(&keypair3, &keypair4.pubkey(), 1, genesis_config.hash());
let t3 =
system_transaction::transfer(&keypair5, &keypair6.pubkey(), 1, genesis_config.hash());
let txs = vec![t1.clone(), t2.clone(), t3];
let res = bank.process_transactions(txs.iter());
assert_eq!(res.len(), 3);
assert_eq!(res[0], Ok(()));
assert_eq!(res[1], Ok(()));
assert_eq!(res[2], Err(TransactionError::AccountNotFound));
bank.freeze();
let rwlockguard_bank_hash = bank.hash.read().unwrap();
let bank_hash = rwlockguard_bank_hash.as_ref();
let txs = vec![t2, t1];
let res = bank_with_success_txs.process_transactions(txs.iter());
assert_eq!(res.len(), 2);
assert_eq!(res[0], Ok(()));
assert_eq!(res[1], Ok(()));
bank_with_success_txs.freeze();
let rwlockguard_bank_with_success_txs_hash = bank_with_success_txs.hash.read().unwrap();
let bank_with_success_txs_hash = rwlockguard_bank_with_success_txs_hash.as_ref();
assert_eq!(bank_with_success_txs_hash, bank_hash);
}
#[derive(Serialize, Deserialize)]
enum MockInstruction {
Deduction,
}
fn mock_process_instruction(
first_instruction_account: usize,
data: &[u8],
invoke_context: &mut InvokeContext,
) -> result::Result<(), InstructionError> {
let keyed_accounts = invoke_context.get_keyed_accounts()?;
if let Ok(instruction) = bincode::deserialize(data) {
match instruction {
MockInstruction::Deduction => {
keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?
.account
.borrow_mut()
.checked_add_lamports(1)?;
keyed_account_at_index(keyed_accounts, first_instruction_account + 2)?
.account
.borrow_mut()
.checked_sub_lamports(1)?;
Ok(())
}
}
} else {
Err(InstructionError::InvalidInstructionData)
}
}
fn create_mock_transaction(
payer: &Keypair,
keypair1: &Keypair,
keypair2: &Keypair,
read_only_keypair: &Keypair,
mock_program_id: Pubkey,
recent_blockhash: Hash,
) -> Transaction {
let account_metas = vec![
AccountMeta::new(payer.pubkey(), true),
AccountMeta::new(keypair1.pubkey(), true),
AccountMeta::new(keypair2.pubkey(), true),
AccountMeta::new_readonly(read_only_keypair.pubkey(), false),
];
let deduct_instruction = Instruction::new_with_bincode(
mock_program_id,
&MockInstruction::Deduction,
account_metas,
);
Transaction::new_signed_with_payer(
&[deduct_instruction],
Some(&payer.pubkey()),
&[payer, keypair1, keypair2],
recent_blockhash,
)
}
fn store_accounts_for_rent_test(
bank: &Bank,
keypairs: &mut Vec<Keypair>,
mock_program_id: Pubkey,
generic_rent_due_for_system_account: u64,
) {
let mut account_pairs: Vec<TransactionAccount> = Vec::with_capacity(keypairs.len() - 1);
account_pairs.push((
keypairs[0].pubkey(),
AccountSharedData::new(
generic_rent_due_for_system_account + 2,
0,
&Pubkey::default(),
),
));
account_pairs.push((
keypairs[1].pubkey(),
AccountSharedData::new(
generic_rent_due_for_system_account + 2,
0,
&Pubkey::default(),
),
));
account_pairs.push((
keypairs[2].pubkey(),
AccountSharedData::new(
generic_rent_due_for_system_account + 2,
0,
&Pubkey::default(),
),
));
account_pairs.push((
keypairs[3].pubkey(),
AccountSharedData::new(
generic_rent_due_for_system_account + 2,
0,
&Pubkey::default(),
),
));
account_pairs.push((
keypairs[4].pubkey(),
AccountSharedData::new(10, 0, &Pubkey::default()),
));
account_pairs.push((
keypairs[5].pubkey(),
AccountSharedData::new(10, 0, &Pubkey::default()),
));
account_pairs.push((
keypairs[6].pubkey(),
AccountSharedData::new(
(2 * generic_rent_due_for_system_account) + 24,
0,
&Pubkey::default(),
),
));
account_pairs.push((
keypairs[8].pubkey(),
AccountSharedData::new(
generic_rent_due_for_system_account + 2 + 929,
0,
&Pubkey::default(),
),
));
account_pairs.push((
keypairs[9].pubkey(),
AccountSharedData::new(10, 0, &Pubkey::default()),
));
// Feeding to MockProgram to test read only rent behaviour
account_pairs.push((
keypairs[10].pubkey(),
AccountSharedData::new(
generic_rent_due_for_system_account + 3,
0,
&Pubkey::default(),
),
));
account_pairs.push((
keypairs[11].pubkey(),
AccountSharedData::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id),
));
account_pairs.push((
keypairs[12].pubkey(),
AccountSharedData::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id),
));
account_pairs.push((
keypairs[13].pubkey(),
AccountSharedData::new(14, 22, &mock_program_id),
));
for account_pair in account_pairs.iter() {
bank.store_account(&account_pair.0, &account_pair.1);
}
}
fn create_child_bank_for_rent_test(
root_bank: &Arc<Bank>,
genesis_config: &GenesisConfig,
mock_program_id: Pubkey,
) -> Bank {
let mut bank = Bank::new_from_parent(
root_bank,
&Pubkey::default(),
years_as_slots(
2.0,
&genesis_config.poh_config.target_tick_duration,
genesis_config.ticks_per_slot,
) as u64,
);
bank.rent_collector.slots_per_year = 421_812.0;
bank.add_builtin("mock_program", &mock_program_id, mock_process_instruction);
bank
}
fn assert_capitalization_diff(bank: &Bank, updater: impl Fn(), asserter: impl Fn(u64, u64)) {
let old = bank.capitalization();
updater();
let new = bank.capitalization();
asserter(old, new);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
}
fn assert_capitalization_diff_with_new_bank(
bank: &Bank,
updater: impl Fn() -> Bank,
asserter: impl Fn(u64, u64),
) -> Bank {
let old = bank.capitalization();
let bank = updater();
let new = bank.capitalization();
asserter(old, new);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
bank
}
#[test]
fn test_store_account_and_update_capitalization_missing() {
let (genesis_config, _mint_keypair) = create_genesis_config(0);
let bank = Bank::new_for_tests(&genesis_config);
let pubkey = solana_sdk::pubkey::new_rand();
let some_lamports = 400;
let account = AccountSharedData::new(some_lamports, 0, &system_program::id());
assert_capitalization_diff(
&bank,
|| bank.store_account_and_update_capitalization(&pubkey, &account),
|old, new| assert_eq!(old + some_lamports, new),
);
assert_eq!(account, bank.get_account(&pubkey).unwrap());
}
#[test]
fn test_store_account_and_update_capitalization_increased() {
let old_lamports = 400;
let (genesis_config, mint_keypair) = create_genesis_config(old_lamports);
let bank = Bank::new_for_tests(&genesis_config);
let pubkey = mint_keypair.pubkey();
let new_lamports = 500;
let account = AccountSharedData::new(new_lamports, 0, &system_program::id());
assert_capitalization_diff(
&bank,
|| bank.store_account_and_update_capitalization(&pubkey, &account),
|old, new| assert_eq!(old + 100, new),
);
assert_eq!(account, bank.get_account(&pubkey).unwrap());
}
#[test]
fn test_store_account_and_update_capitalization_decreased() {
let old_lamports = 400;
let (genesis_config, mint_keypair) = create_genesis_config(old_lamports);
let bank = Bank::new_for_tests(&genesis_config);
let pubkey = mint_keypair.pubkey();
let new_lamports = 100;
let account = AccountSharedData::new(new_lamports, 0, &system_program::id());
assert_capitalization_diff(
&bank,
|| bank.store_account_and_update_capitalization(&pubkey, &account),
|old, new| assert_eq!(old - 300, new),
);
assert_eq!(account, bank.get_account(&pubkey).unwrap());
}
#[test]
fn test_store_account_and_update_capitalization_unchanged() {
let lamports = 400;
let (genesis_config, mint_keypair) = create_genesis_config(lamports);
let bank = Bank::new_for_tests(&genesis_config);
let pubkey = mint_keypair.pubkey();
let account = AccountSharedData::new(lamports, 1, &system_program::id());
assert_capitalization_diff(
&bank,
|| bank.store_account_and_update_capitalization(&pubkey, &account),
|old, new| assert_eq!(old, new),
);
assert_eq!(account, bank.get_account(&pubkey).unwrap());
}
#[test]
fn test_rent_distribution() {
solana_logger::setup();
let bootstrap_validator_pubkey = solana_sdk::pubkey::new_rand();
let bootstrap_validator_stake_lamports = 30;
let mut genesis_config = create_genesis_config_with_leader(
10,
&bootstrap_validator_pubkey,
bootstrap_validator_stake_lamports,
)
.genesis_config;
genesis_config.epoch_schedule = EpochSchedule::custom(
MINIMUM_SLOTS_PER_EPOCH,
genesis_config.epoch_schedule.leader_schedule_slot_offset,
false,
);
genesis_config.rent = Rent {
lamports_per_byte_year: 1,
exemption_threshold: 2.0,
burn_percent: 10,
};
let rent = Rent::free();
let validator_1_pubkey = solana_sdk::pubkey::new_rand();
let validator_1_stake_lamports = 20;
let validator_1_staking_keypair = Keypair::new();
let validator_1_voting_keypair = Keypair::new();
let validator_1_vote_account = vote_state::create_account(
&validator_1_voting_keypair.pubkey(),
&validator_1_pubkey,
0,
validator_1_stake_lamports,
);
let validator_1_stake_account = stake_state::create_account(
&validator_1_staking_keypair.pubkey(),
&validator_1_voting_keypair.pubkey(),
&validator_1_vote_account,
&rent,
validator_1_stake_lamports,
);
genesis_config.accounts.insert(
validator_1_pubkey,
Account::new(42, 0, &system_program::id()),
);
genesis_config.accounts.insert(
validator_1_staking_keypair.pubkey(),
Account::from(validator_1_stake_account),
);
genesis_config.accounts.insert(
validator_1_voting_keypair.pubkey(),
Account::from(validator_1_vote_account),
);
let validator_2_pubkey = solana_sdk::pubkey::new_rand();
let validator_2_stake_lamports = 20;
let validator_2_staking_keypair = Keypair::new();
let validator_2_voting_keypair = Keypair::new();
let validator_2_vote_account = vote_state::create_account(
&validator_2_voting_keypair.pubkey(),
&validator_2_pubkey,
0,
validator_2_stake_lamports,
);
let validator_2_stake_account = stake_state::create_account(
&validator_2_staking_keypair.pubkey(),
&validator_2_voting_keypair.pubkey(),
&validator_2_vote_account,
&rent,
validator_2_stake_lamports,
);
genesis_config.accounts.insert(
validator_2_pubkey,
Account::new(42, 0, &system_program::id()),
);
genesis_config.accounts.insert(
validator_2_staking_keypair.pubkey(),
Account::from(validator_2_stake_account),
);
genesis_config.accounts.insert(
validator_2_voting_keypair.pubkey(),
Account::from(validator_2_vote_account),
);
let validator_3_pubkey = solana_sdk::pubkey::new_rand();
let validator_3_stake_lamports = 30;
let validator_3_staking_keypair = Keypair::new();
let validator_3_voting_keypair = Keypair::new();
let validator_3_vote_account = vote_state::create_account(
&validator_3_voting_keypair.pubkey(),
&validator_3_pubkey,
0,
validator_3_stake_lamports,
);
let validator_3_stake_account = stake_state::create_account(
&validator_3_staking_keypair.pubkey(),
&validator_3_voting_keypair.pubkey(),
&validator_3_vote_account,
&rent,
validator_3_stake_lamports,
);
genesis_config.accounts.insert(
validator_3_pubkey,
Account::new(42, 0, &system_program::id()),
);
genesis_config.accounts.insert(
validator_3_staking_keypair.pubkey(),
Account::from(validator_3_stake_account),
);
genesis_config.accounts.insert(
validator_3_voting_keypair.pubkey(),
Account::from(validator_3_vote_account),
);
genesis_config.rent = Rent {
lamports_per_byte_year: 1,
exemption_threshold: 10.0,
burn_percent: 10,
};
let mut bank = Bank::new_for_tests(&genesis_config);
// Enable rent collection
bank.rent_collector.epoch = 5;
bank.rent_collector.slots_per_year = 192.0;
let payer = Keypair::new();
let payer_account = AccountSharedData::new(400, 0, &system_program::id());
bank.store_account_and_update_capitalization(&payer.pubkey(), &payer_account);
let payee = Keypair::new();
let payee_account = AccountSharedData::new(70, 1, &system_program::id());
bank.store_account_and_update_capitalization(&payee.pubkey(), &payee_account);
let bootstrap_validator_initial_balance = bank.get_balance(&bootstrap_validator_pubkey);
let tx = system_transaction::transfer(&payer, &payee.pubkey(), 180, genesis_config.hash());
let result = bank.process_transaction(&tx);
assert_eq!(result, Ok(()));
let mut total_rent_deducted = 0;
// 400 - 128(Rent) - 180(Transfer)
assert_eq!(bank.get_balance(&payer.pubkey()), 92);
total_rent_deducted += 128;
// 70 - 70(Rent) + 180(Transfer) - 21(Rent)
assert_eq!(bank.get_balance(&payee.pubkey()), 159);
total_rent_deducted += 70 + 21;
let previous_capitalization = bank.capitalization.load(Relaxed);
bank.freeze();
assert_eq!(bank.collected_rent.load(Relaxed), total_rent_deducted);
let burned_portion =
total_rent_deducted * u64::from(bank.rent_collector.rent.burn_percent) / 100;
let rent_to_be_distributed = total_rent_deducted - burned_portion;
let bootstrap_validator_portion =
((bootstrap_validator_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64
+ 1; // Leftover lamport
assert_eq!(
bank.get_balance(&bootstrap_validator_pubkey),
bootstrap_validator_portion + bootstrap_validator_initial_balance
);
// Since, validator 1 and validator 2 has equal smallest stake, it comes down to comparison
// between their pubkey.
let tweak_1 = if validator_1_pubkey > validator_2_pubkey {
1
} else {
0
};
let validator_1_portion =
((validator_1_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + tweak_1;
assert_eq!(
bank.get_balance(&validator_1_pubkey),
validator_1_portion + 42 - tweak_1,
);
// Since, validator 1 and validator 2 has equal smallest stake, it comes down to comparison
// between their pubkey.
let tweak_2 = if validator_2_pubkey > validator_1_pubkey {
1
} else {
0
};
let validator_2_portion =
((validator_2_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + tweak_2;
assert_eq!(
bank.get_balance(&validator_2_pubkey),
validator_2_portion + 42 - tweak_2,
);
let validator_3_portion =
((validator_3_stake_lamports * rent_to_be_distributed) as f64 / 100.0) as u64 + 1;
assert_eq!(
bank.get_balance(&validator_3_pubkey),
validator_3_portion + 42
);
let current_capitalization = bank.capitalization.load(Relaxed);
// only slot history is newly created
let sysvar_and_builtin_program_delta =
min_rent_excempt_balance_for_sysvars(&bank, &[sysvar::slot_history::id()]);
assert_eq!(
previous_capitalization - (current_capitalization - sysvar_and_builtin_program_delta),
burned_portion
);
assert!(bank.calculate_and_verify_capitalization(true));
assert_eq!(
rent_to_be_distributed,
bank.rewards
.read()
.unwrap()
.iter()
.map(|(address, reward)| {
if reward.lamports > 0 {
assert_eq!(reward.reward_type, RewardType::Rent);
if *address == validator_2_pubkey {
assert_eq!(reward.post_balance, validator_2_portion + 42 - tweak_2);
} else if *address == validator_3_pubkey {
assert_eq!(reward.post_balance, validator_3_portion + 42);
}
reward.lamports as u64
} else {
0
}
})
.sum::<u64>()
);
}
#[test]
fn test_distribute_rent_to_validators_overflow() {
solana_logger::setup();
// These values are taken from the real cluster (testnet)
const RENT_TO_BE_DISTRIBUTED: u64 = 120_525;
const VALIDATOR_STAKE: u64 = 374_999_998_287_840;
let validator_pubkey = solana_sdk::pubkey::new_rand();
let mut genesis_config =
create_genesis_config_with_leader(10, &validator_pubkey, VALIDATOR_STAKE)
.genesis_config;
let bank = Bank::new_for_tests(&genesis_config);
let old_validator_lamports = bank.get_balance(&validator_pubkey);
bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED);
let new_validator_lamports = bank.get_balance(&validator_pubkey);
assert_eq!(
new_validator_lamports,
old_validator_lamports + RENT_TO_BE_DISTRIBUTED
);
genesis_config
.accounts
.remove(&feature_set::no_overflow_rent_distribution::id())
.unwrap();
let bank = std::panic::AssertUnwindSafe(Bank::new_for_tests(&genesis_config));
let old_validator_lamports = bank.get_balance(&validator_pubkey);
let new_validator_lamports = std::panic::catch_unwind(|| {
bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED);
bank.get_balance(&validator_pubkey)
});
if let Ok(new_validator_lamports) = new_validator_lamports {
info!("asserting overflowing incorrect rent distribution");
assert_ne!(
new_validator_lamports,
old_validator_lamports + RENT_TO_BE_DISTRIBUTED
);
} else {
info!("NOT-asserting overflowing incorrect rent distribution");
}
}
#[test]
fn test_rent_exempt_executable_account() {
let (mut genesis_config, mint_keypair) = create_genesis_config(100_000);
genesis_config.rent = Rent {
lamports_per_byte_year: 1,
exemption_threshold: 1000.0,
burn_percent: 10,
};
let root_bank = Arc::new(Bank::new_for_tests(&genesis_config));
let bank = create_child_bank_for_rent_test(
&root_bank,
&genesis_config,
solana_sdk::pubkey::new_rand(),
);
let account_pubkey = solana_sdk::pubkey::new_rand();
let account_balance = 1;
let mut account =
AccountSharedData::new(account_balance, 0, &solana_sdk::pubkey::new_rand());
account.set_executable(true);
bank.store_account(&account_pubkey, &account);
let transfer_lamports = 1;
let tx = system_transaction::transfer(
&mint_keypair,
&account_pubkey,
transfer_lamports,
genesis_config.hash(),
);
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::InvalidWritableAccount)
);
assert_eq!(bank.get_balance(&account_pubkey), account_balance);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_rent_complex() {
solana_logger::setup();
let mock_program_id = Pubkey::new(&[2u8; 32]);
let (mut genesis_config, _mint_keypair) = create_genesis_config(10);
let mut keypairs: Vec<Keypair> = Vec::with_capacity(14);
for _i in 0..14 {
keypairs.push(Keypair::new());
}
genesis_config.rent = Rent {
lamports_per_byte_year: 1,
exemption_threshold: 1000.0,
burn_percent: 10,
};
let root_bank = Bank::new_for_tests(&genesis_config);
// until we completely transition to the eager rent collection,
// we must ensure lazy rent collection doens't get broken!
root_bank.restore_old_behavior_for_fragile_tests();
let root_bank = Arc::new(root_bank);
let bank = create_child_bank_for_rent_test(&root_bank, &genesis_config, mock_program_id);
assert_eq!(bank.last_blockhash(), genesis_config.hash());
let slots_elapsed: u64 = (0..=bank.epoch)
.map(|epoch| {
bank.rent_collector
.epoch_schedule
.get_slots_in_epoch(epoch + 1)
})
.sum();
let (generic_rent_due_for_system_account, _) = bank.rent_collector.rent.due(
bank.get_minimum_balance_for_rent_exemption(0) - 1,
0,
slots_elapsed as f64 / bank.rent_collector.slots_per_year,
);
store_accounts_for_rent_test(
&bank,
&mut keypairs,
mock_program_id,
generic_rent_due_for_system_account,
);
let magic_rent_number = 131; // yuck, derive this value programmatically one day
let t1 = system_transaction::transfer(
&keypairs[0],
&keypairs[1].pubkey(),
1,
genesis_config.hash(),
);
let t2 = system_transaction::transfer(
&keypairs[2],
&keypairs[3].pubkey(),
1,
genesis_config.hash(),
);
let t3 = system_transaction::transfer(
&keypairs[4],
&keypairs[5].pubkey(),
1,
genesis_config.hash(),
);
let t4 = system_transaction::transfer(
&keypairs[6],
&keypairs[7].pubkey(),
generic_rent_due_for_system_account + 1,
genesis_config.hash(),
);
let t5 = system_transaction::transfer(
&keypairs[8],
&keypairs[9].pubkey(),
929,
genesis_config.hash(),
);
let t6 = create_mock_transaction(
&keypairs[10],
&keypairs[11],
&keypairs[12],
&keypairs[13],
mock_program_id,
genesis_config.hash(),
);
let txs = vec![t6, t5, t1, t2, t3, t4];
let res = bank.process_transactions(txs.iter());
assert_eq!(res.len(), 6);
assert_eq!(res[0], Ok(()));
assert_eq!(res[1], Ok(()));
assert_eq!(res[2], Ok(()));
assert_eq!(res[3], Ok(()));
assert_eq!(res[4], Err(TransactionError::AccountNotFound));
assert_eq!(res[5], Ok(()));
bank.freeze();
let mut rent_collected = 0;
// 48992 - generic_rent_due_for_system_account(Rent) - 1(transfer)
assert_eq!(bank.get_balance(&keypairs[0].pubkey()), 1);
rent_collected += generic_rent_due_for_system_account;
// 48992 - generic_rent_due_for_system_account(Rent) + 1(transfer)
assert_eq!(bank.get_balance(&keypairs[1].pubkey()), 3);
rent_collected += generic_rent_due_for_system_account;
// 48992 - generic_rent_due_for_system_account(Rent) - 1(transfer)
assert_eq!(bank.get_balance(&keypairs[2].pubkey()), 1);
rent_collected += generic_rent_due_for_system_account;
// 48992 - generic_rent_due_for_system_account(Rent) + 1(transfer)
assert_eq!(bank.get_balance(&keypairs[3].pubkey()), 3);
rent_collected += generic_rent_due_for_system_account;
// No rent deducted
assert_eq!(bank.get_balance(&keypairs[4].pubkey()), 10);
assert_eq!(bank.get_balance(&keypairs[5].pubkey()), 10);
// 98004 - generic_rent_due_for_system_account(Rent) - 48991(transfer)
assert_eq!(bank.get_balance(&keypairs[6].pubkey()), 23);
rent_collected += generic_rent_due_for_system_account;
// 0 + 48990(transfer) - magic_rent_number(Rent)
assert_eq!(
bank.get_balance(&keypairs[7].pubkey()),
generic_rent_due_for_system_account + 1 - magic_rent_number
);
// Epoch should be updated
// Rent deducted on store side
let account8 = bank.get_account(&keypairs[7].pubkey()).unwrap();
// Epoch should be set correctly.
assert_eq!(account8.rent_epoch(), bank.epoch + 1);
rent_collected += magic_rent_number;
// 49921 - generic_rent_due_for_system_account(Rent) - 929(Transfer)
assert_eq!(bank.get_balance(&keypairs[8].pubkey()), 2);
rent_collected += generic_rent_due_for_system_account;
let account10 = bank.get_account(&keypairs[9].pubkey()).unwrap();
// Account was overwritten at load time, since it didn't have sufficient balance to pay rent
// Then, at store time we deducted `magic_rent_number` rent for the current epoch, once it has balance
assert_eq!(account10.rent_epoch(), bank.epoch + 1);
// account data is blank now
assert_eq!(account10.data().len(), 0);
// 10 - 10(Rent) + 929(Transfer) - magic_rent_number(Rent)
assert_eq!(account10.lamports(), 929 - magic_rent_number);
rent_collected += magic_rent_number + 10;
// 48993 - generic_rent_due_for_system_account(Rent)
assert_eq!(bank.get_balance(&keypairs[10].pubkey()), 3);
rent_collected += generic_rent_due_for_system_account;
// 48993 - generic_rent_due_for_system_account(Rent) + 1(Addition by program)
assert_eq!(bank.get_balance(&keypairs[11].pubkey()), 4);
rent_collected += generic_rent_due_for_system_account;
// 48993 - generic_rent_due_for_system_account(Rent) - 1(Deduction by program)
assert_eq!(bank.get_balance(&keypairs[12].pubkey()), 2);
rent_collected += generic_rent_due_for_system_account;
// No rent for read-only account
assert_eq!(bank.get_balance(&keypairs[13].pubkey()), 14);
// Bank's collected rent should be sum of rent collected from all accounts
assert_eq!(bank.collected_rent.load(Relaxed), rent_collected);
}
fn test_rent_collection_partitions(bank: &Bank) -> Vec<Partition> {
let partitions = bank.rent_collection_partitions();
let slot = bank.slot();
if slot.saturating_sub(1) == bank.parent_slot() {
let partition = Bank::variable_cycle_partition_from_previous_slot(
bank.epoch_schedule(),
bank.slot(),
);
assert_eq!(
partitions.last().unwrap(),
&partition,
"slot: {}, slots per epoch: {}, partitions: {:?}",
bank.slot(),
bank.epoch_schedule().slots_per_epoch,
partitions
);
}
partitions
}
#[test]
fn test_rent_eager_across_epoch_without_gap() {
let (genesis_config, _mint_keypair) = create_genesis_config(1);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 32)]);
for _ in 2..32 {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.rent_collection_partitions(), vec![(30, 31, 32)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 64)]);
}
#[test]
fn test_rent_eager_across_epoch_without_gap_mnb() {
solana_logger::setup();
let (mut genesis_config, _mint_keypair) = create_genesis_config(1);
genesis_config.cluster_type = ClusterType::MainnetBeta;
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 0, 32)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 1, 32)]);
for _ in 2..32 {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(test_rent_collection_partitions(&bank), vec![(30, 31, 32)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 0, 64)]);
}
#[test]
fn test_rent_eager_across_epoch_with_full_gap() {
let (mut genesis_config, _mint_keypair) = create_genesis_config(1);
activate_all_features(&mut genesis_config);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 32)]);
for _ in 2..15 {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.rent_collection_partitions(), vec![(13, 14, 32)]);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 49));
assert_eq!(
bank.rent_collection_partitions(),
vec![(14, 31, 32), (0, 0, 64), (0, 17, 64)]
);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(17, 18, 64)]);
}
#[test]
fn test_rent_eager_across_epoch_with_half_gap() {
let (mut genesis_config, _mint_keypair) = create_genesis_config(1);
activate_all_features(&mut genesis_config);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 32)]);
for _ in 2..15 {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.rent_collection_partitions(), vec![(13, 14, 32)]);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 32));
assert_eq!(
bank.rent_collection_partitions(),
vec![(14, 31, 32), (0, 0, 64)]
);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 64)]);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_rent_eager_across_epoch_without_gap_under_multi_epoch_cycle() {
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
let mut genesis_config =
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
genesis_config.cluster_type = ClusterType::MainnetBeta;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, false);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432_000);
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 1));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432_000)]);
for _ in 2..32 {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 31));
assert_eq!(bank.rent_collection_partitions(), vec![(30, 31, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(31, 32, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 1));
assert_eq!(bank.rent_collection_partitions(), vec![(32, 33, 432_000)]);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 1000));
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 1001));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (31, 9));
assert_eq!(
bank.rent_collection_partitions(),
vec![(1000, 1001, 432_000)]
);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 431_998));
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 431_999));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13499, 31));
assert_eq!(
bank.rent_collection_partitions(),
vec![(431_998, 431_999, 432_000)]
);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 1));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432_000)]);
}
#[test]
fn test_rent_eager_across_epoch_with_gap_under_multi_epoch_cycle() {
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
let mut genesis_config =
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
genesis_config.cluster_type = ClusterType::MainnetBeta;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, false);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432_000);
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 1));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432_000)]);
for _ in 2..19 {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 18));
assert_eq!(bank.rent_collection_partitions(), vec![(17, 18, 432_000)]);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 44));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 12));
assert_eq!(
bank.rent_collection_partitions(),
vec![(18, 31, 432_000), (31, 31, 432_000), (31, 44, 432_000)]
);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1, 13));
assert_eq!(bank.rent_collection_partitions(), vec![(44, 45, 432_000)]);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 431_993));
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 432_011));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (13500, 11));
assert_eq!(
bank.rent_collection_partitions(),
vec![
(431_993, 431_999, 432_000),
(0, 0, 432_000),
(0, 11, 432_000)
]
);
}
#[test]
fn test_rent_eager_with_warmup_epochs_under_multi_epoch_cycle() {
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
let mut genesis_config =
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
genesis_config.cluster_type = ClusterType::MainnetBeta;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64 * 8;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, true);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432_000);
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.first_normal_epoch(), 3);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 222));
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 128);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (2, 127));
assert_eq!(bank.rent_collection_partitions(), vec![(126, 127, 128)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 431_872)]);
assert_eq!(431_872 % bank.get_slots_in_epoch(bank.epoch()), 0);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 1));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 431_872)]);
bank = Arc::new(Bank::new_from_parent(
&bank,
&Pubkey::default(),
431_872 + 223 - 1,
));
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1689, 255));
assert_eq!(
bank.rent_collection_partitions(),
vec![(431_870, 431_871, 431_872)]
);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (1690, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 431_872)]);
}
#[test]
fn test_rent_eager_under_fixed_cycle_for_development() {
solana_logger::setup();
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
let mut genesis_config =
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64 * 8;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, true);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 32);
assert_eq!(bank.first_normal_epoch(), 3);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (0, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432_000)]);
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 222));
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 128);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (2, 127));
assert_eq!(bank.rent_collection_partitions(), vec![(222, 223, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 0));
assert_eq!(bank.rent_collection_partitions(), vec![(223, 224, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_slots_in_epoch(bank.epoch()), 256);
assert_eq!(bank.get_epoch_and_slot_index(bank.slot()), (3, 1));
assert_eq!(bank.rent_collection_partitions(), vec![(224, 225, 432_000)]);
bank = Arc::new(Bank::new_from_parent(
&bank,
&Pubkey::default(),
432_000 - 2,
));
bank = Arc::new(new_from_parent(&bank));
assert_eq!(
bank.rent_collection_partitions(),
vec![(431_998, 431_999, 432_000)]
);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 432_000)]);
bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.rent_collection_partitions(), vec![(0, 1, 432_000)]);
bank = Arc::new(Bank::new_from_parent(
&bank,
&Pubkey::default(),
864_000 - 20,
));
bank = Arc::new(Bank::new_from_parent(
&bank,
&Pubkey::default(),
864_000 + 39,
));
assert_eq!(
bank.rent_collection_partitions(),
vec![
(431_980, 431_999, 432_000),
(0, 0, 432_000),
(0, 39, 432_000)
]
);
}
#[test]
fn test_rent_eager_pubkey_range_minimal() {
let range = Bank::pubkey_range_from_partition((0, 0, 1));
assert_eq!(
range,
Pubkey::new_from_array([0x00; 32])..=Pubkey::new_from_array([0xff; 32])
);
}
#[test]
fn test_rent_eager_pubkey_range_maximum() {
let max = !0;
let range = Bank::pubkey_range_from_partition((0, 0, max));
assert_eq!(
range,
Pubkey::new_from_array([0x00; 32])
..=Pubkey::new_from_array([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let range = Bank::pubkey_range_from_partition((0, 1, max));
assert_eq!(
range,
Pubkey::new_from_array([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
])
..=Pubkey::new_from_array([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let range = Bank::pubkey_range_from_partition((max - 3, max - 2, max));
assert_eq!(
range,
Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
])
..=Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let range = Bank::pubkey_range_from_partition((max - 2, max - 1, max));
assert_eq!(
range,
Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
])
..=Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
fn should_cause_overflow(partition_count: u64) -> bool {
// Check `partition_width = (u64::max_value() + 1) / partition_count` is exact and
// does not have a remainder.
// This way, `partition_width * partition_count == (u64::max_value() + 1)`,
// so the test actually tests for overflow
(u64::max_value() - partition_count + 1) % partition_count == 0
}
let max_exact = 64;
// Make sure `max_exact` divides evenly when calculating `calculate_partition_width`
assert!(should_cause_overflow(max_exact));
// Make sure `max_inexact` doesn't divide evenly when calculating `calculate_partition_width`
let max_inexact = 10;
assert!(!should_cause_overflow(max_inexact));
for max in &[max_exact, max_inexact] {
let range = Bank::pubkey_range_from_partition((max - 1, max - 1, *max));
assert_eq!(
range,
Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
..=Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
}
}
fn map_to_test_bad_range() -> std::collections::BTreeMap<Pubkey, i8> {
let mut map = std::collections::BTreeMap::new();
// when empty, std::collections::BTreeMap doesn't sanitize given range...
map.insert(solana_sdk::pubkey::new_rand(), 1);
map
}
#[test]
#[should_panic(expected = "range start is greater than range end in BTreeMap")]
fn test_rent_eager_bad_range() {
let test_map = map_to_test_bad_range();
let _ = test_map.range(
Pubkey::new_from_array([
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
])
..=Pubkey::new_from_array([
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]),
);
}
#[test]
fn test_rent_eager_pubkey_range_noop_range() {
let test_map = map_to_test_bad_range();
let range = Bank::pubkey_range_from_partition((0, 0, 3));
assert_eq!(
range,
Pubkey::new_from_array([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
let range = Bank::pubkey_range_from_partition((1, 1, 3));
assert_eq!(
range,
Pubkey::new_from_array([
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00
])
);
let _ = test_map.range(range);
let range = Bank::pubkey_range_from_partition((2, 2, 3));
assert_eq!(
range,
Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff
])
..=Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
}
#[test]
fn test_rent_eager_pubkey_range_dividable() {
let test_map = map_to_test_bad_range();
let range = Bank::pubkey_range_from_partition((0, 0, 2));
assert_eq!(
range,
Pubkey::new_from_array([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
let range = Bank::pubkey_range_from_partition((0, 1, 2));
assert_eq!(
range,
Pubkey::new_from_array([
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
}
#[test]
fn test_rent_eager_pubkey_range_not_dividable() {
solana_logger::setup();
let test_map = map_to_test_bad_range();
let range = Bank::pubkey_range_from_partition((0, 0, 3));
assert_eq!(
range,
Pubkey::new_from_array([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
let range = Bank::pubkey_range_from_partition((0, 1, 3));
assert_eq!(
range,
Pubkey::new_from_array([
0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa9, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
let range = Bank::pubkey_range_from_partition((1, 2, 3));
assert_eq!(
range,
Pubkey::new_from_array([
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
}
#[test]
fn test_rent_eager_pubkey_range_gap() {
solana_logger::setup();
let test_map = map_to_test_bad_range();
let range = Bank::pubkey_range_from_partition((120, 1023, 12345));
assert_eq!(
range,
Pubkey::new_from_array([
0x02, 0x82, 0x5a, 0x89, 0xd1, 0xac, 0x58, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
])
..=Pubkey::new_from_array([
0x15, 0x3c, 0x1d, 0xf1, 0xc6, 0x39, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
])
);
let _ = test_map.range(range);
}
impl Bank {
fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec<Slot> {
let (locked_entry, _) = self
.rc
.accounts
.accounts_db
.accounts_index
.get(pubkey, Some(ancestors), None)
.unwrap();
locked_entry
.slot_list()
.iter()
.map(|(slot, _)| *slot)
.collect::<Vec<Slot>>()
}
fn first_slot_in_next_epoch(&self) -> Slot {
self.epoch_schedule()
.get_first_slot_in_epoch(self.epoch() + 1)
}
}
#[test]
fn test_rent_eager_collect_rent_in_partition() {
solana_logger::setup();
let (mut genesis_config, _mint_keypair) = create_genesis_config(1);
activate_all_features(&mut genesis_config);
let zero_lamport_pubkey = solana_sdk::pubkey::new_rand();
let rent_due_pubkey = solana_sdk::pubkey::new_rand();
let rent_exempt_pubkey = solana_sdk::pubkey::new_rand();
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
let zero_lamports = 0;
let little_lamports = 1234;
let large_lamports = 123_456_789;
let rent_collected = 22;
bank.store_account(
&zero_lamport_pubkey,
&AccountSharedData::new(zero_lamports, 0, &Pubkey::default()),
);
bank.store_account(
&rent_due_pubkey,
&AccountSharedData::new(little_lamports, 0, &Pubkey::default()),
);
bank.store_account(
&rent_exempt_pubkey,
&AccountSharedData::new(large_lamports, 0, &Pubkey::default()),
);
let genesis_slot = 0;
let some_slot = 1000;
let ancestors = vec![(some_slot, 0), (0, 1)].into_iter().collect();
bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), some_slot));
assert_eq!(bank.collected_rent.load(Relaxed), 0);
assert_eq!(
bank.get_account(&rent_due_pubkey).unwrap().lamports(),
little_lamports
);
assert_eq!(bank.get_account(&rent_due_pubkey).unwrap().rent_epoch(), 0);
assert_eq!(
bank.slots_by_pubkey(&rent_due_pubkey, &ancestors),
vec![genesis_slot]
);
assert_eq!(
bank.slots_by_pubkey(&rent_exempt_pubkey, &ancestors),
vec![genesis_slot]
);
assert_eq!(
bank.slots_by_pubkey(&zero_lamport_pubkey, &ancestors),
vec![genesis_slot]
);
bank.collect_rent_in_partition((0, 0, 1)); // all range
// unrelated 1-lamport accounts exists
assert_eq!(bank.collected_rent.load(Relaxed), rent_collected + 2);
assert_eq!(
bank.get_account(&rent_due_pubkey).unwrap().lamports(),
little_lamports - rent_collected
);
assert_eq!(bank.get_account(&rent_due_pubkey).unwrap().rent_epoch(), 6);
assert_eq!(
bank.get_account(&rent_exempt_pubkey).unwrap().lamports(),
large_lamports
);
assert_eq!(
bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch(),
5
);
assert_eq!(
bank.slots_by_pubkey(&rent_due_pubkey, &ancestors),
vec![genesis_slot, some_slot]
);
assert_eq!(
bank.slots_by_pubkey(&rent_exempt_pubkey, &ancestors),
vec![genesis_slot, some_slot]
);
assert_eq!(
bank.slots_by_pubkey(&zero_lamport_pubkey, &ancestors),
vec![genesis_slot]
);
}
#[test]
fn test_rent_eager_collect_rent_zero_lamport_deterministic() {
solana_logger::setup();
let (genesis_config, _mint_keypair) = create_genesis_config(1);
let zero_lamport_pubkey = solana_sdk::pubkey::new_rand();
let genesis_bank1 = Arc::new(Bank::new_for_tests(&genesis_config));
let genesis_bank2 = Arc::new(Bank::new_for_tests(&genesis_config));
let bank1_with_zero = Arc::new(new_from_parent(&genesis_bank1));
let bank1_without_zero = Arc::new(new_from_parent(&genesis_bank2));
let zero_lamports = 0;
let account = AccountSharedData::new(zero_lamports, 0, &Pubkey::default());
bank1_with_zero.store_account(&zero_lamport_pubkey, &account);
bank1_without_zero.store_account(&zero_lamport_pubkey, &account);
bank1_without_zero
.rc
.accounts
.accounts_db
.accounts_index
.add_root(genesis_bank1.slot() + 1, false);
bank1_without_zero
.rc
.accounts
.accounts_db
.accounts_index
.purge_roots(&zero_lamport_pubkey);
let some_slot = 1000;
let bank2_with_zero = Arc::new(Bank::new_from_parent(
&bank1_with_zero,
&Pubkey::default(),
some_slot,
));
let bank2_without_zero = Arc::new(Bank::new_from_parent(
&bank1_without_zero,
&Pubkey::default(),
some_slot,
));
let hash1_with_zero = bank1_with_zero.hash();
let hash1_without_zero = bank1_without_zero.hash();
assert_eq!(hash1_with_zero, hash1_without_zero);
assert_ne!(hash1_with_zero, Hash::default());
bank2_with_zero.collect_rent_in_partition((0, 0, 1)); // all
bank2_without_zero.collect_rent_in_partition((0, 0, 1)); // all
bank2_with_zero.freeze();
let hash2_with_zero = bank2_with_zero.hash();
bank2_without_zero.freeze();
let hash2_without_zero = bank2_without_zero.hash();
assert_eq!(hash2_with_zero, hash2_without_zero);
assert_ne!(hash2_with_zero, Hash::default());
}
#[test]
fn test_bank_update_vote_stake_rewards() {
solana_logger::setup();
// create a bank that ticks really slowly...
let bank0 = Arc::new(Bank::new_for_tests(&GenesisConfig {
accounts: (0..42)
.map(|_| {
(
solana_sdk::pubkey::new_rand(),
Account::new(1_000_000_000, 0, &Pubkey::default()),
)
})
.collect(),
// set it up so the first epoch is a full year long
poh_config: PohConfig {
target_tick_duration: Duration::from_secs(
SECONDS_PER_YEAR as u64
/ MINIMUM_SLOTS_PER_EPOCH as u64
/ DEFAULT_TICKS_PER_SLOT,
),
hashes_per_tick: None,
target_tick_count: None,
},
cluster_type: ClusterType::MainnetBeta,
..GenesisConfig::default()
}));
// enable lazy rent collection because this test depends on rent-due accounts
// not being eagerly-collected for exact rewards calculation
bank0.restore_old_behavior_for_fragile_tests();
let sysvar_and_builtin_program_delta0 = 11;
assert_eq!(
bank0.capitalization(),
42 * 1_000_000_000 + sysvar_and_builtin_program_delta0
);
assert!(bank0.rewards.read().unwrap().is_empty());
let ((vote_id, mut vote_account), (stake_id, stake_account)) =
crate::stakes::tests::create_staked_node_accounts(1_0000);
// set up accounts
bank0.store_account_and_update_capitalization(&stake_id, &stake_account);
// generate some rewards
let mut vote_state = Some(VoteState::from(&vote_account).unwrap());
for i in 0..MAX_LOCKOUT_HISTORY + 42 {
if let Some(v) = vote_state.as_mut() {
v.process_slot_vote_unchecked(i as u64)
}
let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap()));
VoteState::to(&versioned, &mut vote_account).unwrap();
bank0.store_account_and_update_capitalization(&vote_id, &vote_account);
match versioned {
VoteStateVersions::Current(v) => {
vote_state = Some(*v);
}
_ => panic!("Has to be of type Current"),
};
}
bank0.store_account_and_update_capitalization(&vote_id, &vote_account);
let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let validator_points: u128 = bank0
.load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer())
.vote_with_stake_delegations_map
.into_iter()
.map(
|(
_vote_pubkey,
VoteWithStakeDelegations {
vote_state,
delegations,
..
},
)| {
delegations
.iter()
.map(move |(_stake_pubkey, (stake_state, _stake_account))| {
stake_state::calculate_points(stake_state, &vote_state, None)
.unwrap_or(0)
})
.sum::<u128>()
},
)
.sum();
// put a child bank in epoch 1, which calls update_rewards()...
let bank1 = Bank::new_from_parent(
&bank0,
&Pubkey::default(),
bank0.get_slots_in_epoch(bank0.epoch()) + 1,
);
// verify that there's inflation
assert_ne!(bank1.capitalization(), bank0.capitalization());
// verify the inflation is represented in validator_points *
let sysvar_and_builtin_program_delta1 = 2;
let paid_rewards =
bank1.capitalization() - bank0.capitalization() - sysvar_and_builtin_program_delta1;
let rewards = bank1
.get_account(&sysvar::rewards::id())
.map(|account| from_account::<Rewards, _>(&account).unwrap())
.unwrap();
// verify the stake and vote accounts are the right size
assert!(
((bank1.get_balance(&stake_id) - stake_account.lamports() + bank1.get_balance(&vote_id)
- vote_account.lamports()) as f64
- rewards.validator_point_value * validator_points as f64)
.abs()
< 1.0
);
// verify the rewards are the right size
let allocated_rewards = rewards.validator_point_value * validator_points as f64;
assert!((allocated_rewards - paid_rewards as f64).abs() < 1.0); // rounding, truncating
// verify validator rewards show up in bank1.rewards vector
assert_eq!(
*bank1.rewards.read().unwrap(),
vec![(
stake_id,
RewardInfo {
reward_type: RewardType::Staking,
lamports: (rewards.validator_point_value * validator_points as f64) as i64,
post_balance: bank1.get_balance(&stake_id),
commission: Some(0),
}
)]
);
bank1.freeze();
assert!(bank1.calculate_and_verify_capitalization(true));
}
fn do_test_bank_update_rewards_determinism() -> u64 {
// create a bank that ticks really slowly...
let bank = Arc::new(Bank::new_for_tests(&GenesisConfig {
accounts: (0..42)
.map(|_| {
(
solana_sdk::pubkey::new_rand(),
Account::new(1_000_000_000, 0, &Pubkey::default()),
)
})
.collect(),
// set it up so the first epoch is a full year long
poh_config: PohConfig {
target_tick_duration: Duration::from_secs(
SECONDS_PER_YEAR as u64
/ MINIMUM_SLOTS_PER_EPOCH as u64
/ DEFAULT_TICKS_PER_SLOT,
),
hashes_per_tick: None,
target_tick_count: None,
},
cluster_type: ClusterType::MainnetBeta,
..GenesisConfig::default()
}));
// enable lazy rent collection because this test depends on rent-due accounts
// not being eagerly-collected for exact rewards calculation
bank.restore_old_behavior_for_fragile_tests();
let sysvar_and_builtin_program_delta = 11;
assert_eq!(
bank.capitalization(),
42 * 1_000_000_000 + sysvar_and_builtin_program_delta
);
assert!(bank.rewards.read().unwrap().is_empty());
let vote_id = solana_sdk::pubkey::new_rand();
let mut vote_account =
vote_state::create_account(&vote_id, &solana_sdk::pubkey::new_rand(), 50, 100);
let (stake_id1, stake_account1) = crate::stakes::tests::create_stake_account(123, &vote_id);
let (stake_id2, stake_account2) = crate::stakes::tests::create_stake_account(456, &vote_id);
// set up accounts
bank.store_account_and_update_capitalization(&stake_id1, &stake_account1);
bank.store_account_and_update_capitalization(&stake_id2, &stake_account2);
// generate some rewards
let mut vote_state = Some(VoteState::from(&vote_account).unwrap());
for i in 0..MAX_LOCKOUT_HISTORY + 42 {
if let Some(v) = vote_state.as_mut() {
v.process_slot_vote_unchecked(i as u64)
}
let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap()));
VoteState::to(&versioned, &mut vote_account).unwrap();
bank.store_account_and_update_capitalization(&vote_id, &vote_account);
match versioned {
VoteStateVersions::Current(v) => {
vote_state = Some(*v);
}
_ => panic!("Has to be of type Current"),
};
}
bank.store_account_and_update_capitalization(&vote_id, &vote_account);
// put a child bank in epoch 1, which calls update_rewards()...
let bank1 = Bank::new_from_parent(
&bank,
&Pubkey::default(),
bank.get_slots_in_epoch(bank.epoch()) + 1,
);
// verify that there's inflation
assert_ne!(bank1.capitalization(), bank.capitalization());
bank1.freeze();
assert!(bank1.calculate_and_verify_capitalization(true));
// verify voting and staking rewards are recorded
let rewards = bank1.rewards.read().unwrap();
rewards
.iter()
.find(|(_address, reward)| reward.reward_type == RewardType::Voting)
.unwrap();
rewards
.iter()
.find(|(_address, reward)| reward.reward_type == RewardType::Staking)
.unwrap();
bank1.capitalization()
}
#[test]
fn test_bank_update_rewards_determinism() {
solana_logger::setup();
// The same reward should be distributed given same credits
let expected_capitalization = do_test_bank_update_rewards_determinism();
// Repeat somewhat large number of iterations to expose possible different behavior
// depending on the randomly-seeded HashMap ordering
for _ in 0..30 {
let actual_capitalization = do_test_bank_update_rewards_determinism();
assert_eq!(actual_capitalization, expected_capitalization);
}
}
// Test that purging 0 lamports accounts works.
#[test]
fn test_purge_empty_accounts() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(500_000);
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let mut bank = parent;
for _ in 0..10 {
let blockhash = bank.last_blockhash();
let pubkey = solana_sdk::pubkey::new_rand();
let tx = system_transaction::transfer(&mint_keypair, &pubkey, 0, blockhash);
bank.process_transaction(&tx).unwrap();
bank.freeze();
bank.squash();
bank = Arc::new(new_from_parent(&bank));
}
bank.freeze();
bank.squash();
bank.force_flush_accounts_cache();
let hash = bank.update_accounts_hash();
bank.clean_accounts(false, false, None);
assert_eq!(bank.update_accounts_hash(), hash);
let bank0 = Arc::new(new_from_parent(&bank));
let blockhash = bank.last_blockhash();
let keypair = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 10, blockhash);
bank0.process_transaction(&tx).unwrap();
let bank1 = Arc::new(new_from_parent(&bank0));
let pubkey = solana_sdk::pubkey::new_rand();
let blockhash = bank.last_blockhash();
let tx = system_transaction::transfer(&keypair, &pubkey, 10, blockhash);
bank1.process_transaction(&tx).unwrap();
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports(), 10);
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
info!("bank0 purge");
let hash = bank0.update_accounts_hash();
bank0.clean_accounts(false, false, None);
assert_eq!(bank0.update_accounts_hash(), hash);
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports(), 10);
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
info!("bank1 purge");
bank1.clean_accounts(false, false, None);
assert_eq!(bank0.get_account(&keypair.pubkey()).unwrap().lamports(), 10);
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
assert!(bank0.verify_bank_hash(true));
// Squash and then verify hash_internal value
bank0.freeze();
bank0.squash();
assert!(bank0.verify_bank_hash(true));
bank1.freeze();
bank1.squash();
bank1.update_accounts_hash();
assert!(bank1.verify_bank_hash(true));
// keypair should have 0 tokens on both forks
assert_eq!(bank0.get_account(&keypair.pubkey()), None);
assert_eq!(bank1.get_account(&keypair.pubkey()), None);
bank1.force_flush_accounts_cache();
bank1.clean_accounts(false, false, None);
assert!(bank1.verify_bank_hash(true));
}
#[test]
fn test_two_payments_to_one_party() {
let (genesis_config, mint_keypair) = create_genesis_config(10_000);
let pubkey = solana_sdk::pubkey::new_rand();
let bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.last_blockhash(), genesis_config.hash());
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_eq!(bank.get_balance(&pubkey), 1_000);
bank.transfer(500, &mint_keypair, &pubkey).unwrap();
assert_eq!(bank.get_balance(&pubkey), 1_500);
assert_eq!(bank.transaction_count(), 2);
}
#[test]
fn test_one_source_two_tx_one_batch() {
let (genesis_config, mint_keypair) = create_genesis_config(1);
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.last_blockhash(), genesis_config.hash());
let t1 = system_transaction::transfer(&mint_keypair, &key1, 1, genesis_config.hash());
let t2 = system_transaction::transfer(&mint_keypair, &key2, 1, genesis_config.hash());
let txs = vec![t1.clone(), t2.clone()];
let res = bank.process_transactions(txs.iter());
assert_eq!(res.len(), 2);
assert_eq!(res[0], Ok(()));
assert_eq!(res[1], Err(TransactionError::AccountInUse));
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 0);
assert_eq!(bank.get_balance(&key1), 1);
assert_eq!(bank.get_balance(&key2), 0);
assert_eq!(bank.get_signature_status(&t1.signatures[0]), Some(Ok(())));
// TODO: Transactions that fail to pay a fee could be dropped silently.
// Non-instruction errors don't get logged in the signature cache
assert_eq!(bank.get_signature_status(&t2.signatures[0]), None);
}
#[test]
fn test_one_tx_two_out_atomic_fail() {
let (genesis_config, mint_keypair) = create_genesis_config(1);
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let bank = Bank::new_for_tests(&genesis_config);
let instructions =
system_instruction::transfer_many(&mint_keypair.pubkey(), &[(key1, 1), (key2, 1)]);
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
let tx = Transaction::new(&[&mint_keypair], message, genesis_config.hash());
assert_eq!(
bank.process_transaction(&tx).unwrap_err(),
TransactionError::InstructionError(1, SystemError::ResultWithNegativeLamports.into())
);
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 1);
assert_eq!(bank.get_balance(&key1), 0);
assert_eq!(bank.get_balance(&key2), 0);
}
#[test]
fn test_one_tx_two_out_atomic_pass() {
let (genesis_config, mint_keypair) = create_genesis_config(2);
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let bank = Bank::new_for_tests(&genesis_config);
let instructions =
system_instruction::transfer_many(&mint_keypair.pubkey(), &[(key1, 1), (key2, 1)]);
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
let tx = Transaction::new(&[&mint_keypair], message, genesis_config.hash());
bank.process_transaction(&tx).unwrap();
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 0);
assert_eq!(bank.get_balance(&key1), 1);
assert_eq!(bank.get_balance(&key2), 1);
}
// This test demonstrates that fees are paid even when a program fails.
#[test]
fn test_detect_failed_duplicate_transactions() {
let (mut genesis_config, mint_keypair) = create_genesis_config(2);
genesis_config.fee_rate_governor = FeeRateGovernor::new(1, 0);
let bank = Bank::new_for_tests(&genesis_config);
let dest = Keypair::new();
// source with 0 program context
let tx =
system_transaction::transfer(&mint_keypair, &dest.pubkey(), 2, genesis_config.hash());
let signature = tx.signatures[0];
assert!(!bank.has_signature(&signature));
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::InstructionError(
0,
SystemError::ResultWithNegativeLamports.into(),
))
);
// The lamports didn't move, but the from address paid the transaction fee.
assert_eq!(bank.get_balance(&dest.pubkey()), 0);
// This should be the original balance minus the transaction fee.
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 1);
}
#[test]
fn test_account_not_found() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(0);
let bank = Bank::new_for_tests(&genesis_config);
let keypair = Keypair::new();
assert_eq!(
bank.transfer(1, &keypair, &mint_keypair.pubkey()),
Err(TransactionError::AccountNotFound)
);
assert_eq!(bank.transaction_count(), 0);
}
#[test]
fn test_insufficient_funds() {
let (genesis_config, mint_keypair) = create_genesis_config(11_000);
let bank = Bank::new_for_tests(&genesis_config);
let pubkey = solana_sdk::pubkey::new_rand();
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_eq!(bank.transaction_count(), 1);
assert_eq!(bank.get_balance(&pubkey), 1_000);
assert_eq!(
bank.transfer(10_001, &mint_keypair, &pubkey),
Err(TransactionError::InstructionError(
0,
SystemError::ResultWithNegativeLamports.into(),
))
);
assert_eq!(bank.transaction_count(), 1);
let mint_pubkey = mint_keypair.pubkey();
assert_eq!(bank.get_balance(&mint_pubkey), 10_000);
assert_eq!(bank.get_balance(&pubkey), 1_000);
}
#[test]
fn test_transfer_to_newb() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(10_000);
let bank = Bank::new_for_tests(&genesis_config);
let pubkey = solana_sdk::pubkey::new_rand();
bank.transfer(500, &mint_keypair, &pubkey).unwrap();
assert_eq!(bank.get_balance(&pubkey), 500);
}
#[test]
fn test_transfer_to_sysvar() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(10_000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let normal_pubkey = solana_sdk::pubkey::new_rand();
let sysvar_pubkey = sysvar::clock::id();
assert_eq!(bank.get_balance(&normal_pubkey), 0);
assert_eq!(bank.get_balance(&sysvar_pubkey), 1);
bank.transfer(500, &mint_keypair, &normal_pubkey).unwrap();
bank.transfer(500, &mint_keypair, &sysvar_pubkey)
.unwrap_err();
assert_eq!(bank.get_balance(&normal_pubkey), 500);
assert_eq!(bank.get_balance(&sysvar_pubkey), 1);
let bank = Arc::new(new_from_parent(&bank));
assert_eq!(bank.get_balance(&normal_pubkey), 500);
assert_eq!(bank.get_balance(&sysvar_pubkey), 1);
}
#[test]
fn test_bank_deposit() {
let (genesis_config, _mint_keypair) = create_genesis_config(100);
let bank = Bank::new_for_tests(&genesis_config);
// Test new account
let key = Keypair::new();
let new_balance = bank.deposit(&key.pubkey(), 10).unwrap();
assert_eq!(new_balance, 10);
assert_eq!(bank.get_balance(&key.pubkey()), 10);
// Existing account
let new_balance = bank.deposit(&key.pubkey(), 3).unwrap();
assert_eq!(new_balance, 13);
assert_eq!(bank.get_balance(&key.pubkey()), 13);
}
#[test]
fn test_bank_withdraw() {
let (genesis_config, _mint_keypair) = create_genesis_config(100);
let bank = Bank::new_for_tests(&genesis_config);
// Test no account
let key = Keypair::new();
assert_eq!(
bank.withdraw(&key.pubkey(), 10),
Err(TransactionError::AccountNotFound)
);
bank.deposit(&key.pubkey(), 3).unwrap();
assert_eq!(bank.get_balance(&key.pubkey()), 3);
// Low balance
assert_eq!(
bank.withdraw(&key.pubkey(), 10),
Err(TransactionError::InsufficientFundsForFee)
);
// Enough balance
assert_eq!(bank.withdraw(&key.pubkey(), 2), Ok(()));
assert_eq!(bank.get_balance(&key.pubkey()), 1);
}
#[test]
fn test_bank_withdraw_from_nonce_account() {
let (mut genesis_config, _mint_keypair) = create_genesis_config(100_000);
genesis_config.rent.lamports_per_byte_year = 42;
let bank = Bank::new_for_tests(&genesis_config);
let min_balance = bank.get_minimum_balance_for_rent_exemption(nonce::State::size());
let nonce = Keypair::new();
let nonce_account = AccountSharedData::new_data(
min_balance + 42,
&nonce::state::Versions::new_current(nonce::State::Initialized(
nonce::state::Data::default(),
)),
&system_program::id(),
)
.unwrap();
bank.store_account(&nonce.pubkey(), &nonce_account);
assert_eq!(bank.get_balance(&nonce.pubkey()), min_balance + 42);
// Resulting in non-zero, but sub-min_balance balance fails
assert_eq!(
bank.withdraw(&nonce.pubkey(), min_balance / 2),
Err(TransactionError::InsufficientFundsForFee)
);
assert_eq!(bank.get_balance(&nonce.pubkey()), min_balance + 42);
// Resulting in exactly rent-exempt balance succeeds
bank.withdraw(&nonce.pubkey(), 42).unwrap();
assert_eq!(bank.get_balance(&nonce.pubkey()), min_balance);
// Account closure fails
assert_eq!(
bank.withdraw(&nonce.pubkey(), min_balance),
Err(TransactionError::InsufficientFundsForFee),
);
}
#[test]
fn test_bank_tx_fee() {
solana_logger::setup();
let arbitrary_transfer_amount = 42;
let mint = arbitrary_transfer_amount * 100;
let leader = solana_sdk::pubkey::new_rand();
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(mint, &leader, 3);
genesis_config.fee_rate_governor = FeeRateGovernor::new(4, 0); // something divisible by 2
let expected_fee_paid = genesis_config
.fee_rate_governor
.create_fee_calculator()
.lamports_per_signature;
let (expected_fee_collected, expected_fee_burned) =
genesis_config.fee_rate_governor.burn(expected_fee_paid);
let mut bank = Bank::new_for_tests(&genesis_config);
let capitalization = bank.capitalization();
let key = Keypair::new();
let tx = system_transaction::transfer(
&mint_keypair,
&key.pubkey(),
arbitrary_transfer_amount,
bank.last_blockhash(),
);
let initial_balance = bank.get_balance(&leader);
assert_eq!(bank.process_transaction(&tx), Ok(()));
assert_eq!(bank.get_balance(&key.pubkey()), arbitrary_transfer_amount);
assert_eq!(
bank.get_balance(&mint_keypair.pubkey()),
mint - arbitrary_transfer_amount - expected_fee_paid
);
assert_eq!(bank.get_balance(&leader), initial_balance);
goto_end_of_slot(&mut bank);
assert_eq!(bank.signature_count(), 1);
assert_eq!(
bank.get_balance(&leader),
initial_balance + expected_fee_collected
); // Leader collects fee after the bank is frozen
// verify capitalization
let sysvar_and_builtin_program_delta = 1;
assert_eq!(
capitalization - expected_fee_burned + sysvar_and_builtin_program_delta,
bank.capitalization()
);
assert_eq!(
*bank.rewards.read().unwrap(),
vec![(
leader,
RewardInfo {
reward_type: RewardType::Fee,
lamports: expected_fee_collected as i64,
post_balance: initial_balance + expected_fee_collected,
commission: None,
}
)]
);
// Verify that an InstructionError collects fees, too
let mut bank = Bank::new_from_parent(&Arc::new(bank), &leader, 1);
let mut tx =
system_transaction::transfer(&mint_keypair, &key.pubkey(), 1, bank.last_blockhash());
// Create a bogus instruction to system_program to cause an instruction error
tx.message.instructions[0].data[0] = 40;
bank.process_transaction(&tx)
.expect_err("instruction error");
assert_eq!(bank.get_balance(&key.pubkey()), arbitrary_transfer_amount); // no change
assert_eq!(
bank.get_balance(&mint_keypair.pubkey()),
mint - arbitrary_transfer_amount - 2 * expected_fee_paid
); // mint_keypair still pays a fee
goto_end_of_slot(&mut bank);
assert_eq!(bank.signature_count(), 1);
// Profit! 2 transaction signatures processed at 3 lamports each
assert_eq!(
bank.get_balance(&leader),
initial_balance + 2 * expected_fee_collected
);
assert_eq!(
*bank.rewards.read().unwrap(),
vec![(
leader,
RewardInfo {
reward_type: RewardType::Fee,
lamports: expected_fee_collected as i64,
post_balance: initial_balance + 2 * expected_fee_collected,
commission: None,
}
)]
);
}
#[test]
fn test_bank_blockhash_fee_schedule() {
//solana_logger::setup();
let leader = solana_sdk::pubkey::new_rand();
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(1_000_000, &leader, 3);
genesis_config
.fee_rate_governor
.target_lamports_per_signature = 1000;
genesis_config.fee_rate_governor.target_signatures_per_slot = 1;
let mut bank = Bank::new_for_tests(&genesis_config);
goto_end_of_slot(&mut bank);
let cheap_blockhash = bank.last_blockhash();
let cheap_lamports_per_signature = bank.get_lamports_per_signature();
assert_eq!(cheap_lamports_per_signature, 0);
let mut bank = Bank::new_from_parent(&Arc::new(bank), &leader, 1);
goto_end_of_slot(&mut bank);
let expensive_blockhash = bank.last_blockhash();
let expensive_lamports_per_signature = bank.get_lamports_per_signature();
assert!(cheap_lamports_per_signature < expensive_lamports_per_signature);
let bank = Bank::new_from_parent(&Arc::new(bank), &leader, 2);
// Send a transfer using cheap_blockhash
let key = Keypair::new();
let initial_mint_balance = bank.get_balance(&mint_keypair.pubkey());
let tx = system_transaction::transfer(&mint_keypair, &key.pubkey(), 1, cheap_blockhash);
assert_eq!(bank.process_transaction(&tx), Ok(()));
assert_eq!(bank.get_balance(&key.pubkey()), 1);
assert_eq!(
bank.get_balance(&mint_keypair.pubkey()),
initial_mint_balance - 1 - cheap_lamports_per_signature
);
// Send a transfer using expensive_blockhash
let key = Keypair::new();
let initial_mint_balance = bank.get_balance(&mint_keypair.pubkey());
let tx = system_transaction::transfer(&mint_keypair, &key.pubkey(), 1, expensive_blockhash);
assert_eq!(bank.process_transaction(&tx), Ok(()));
assert_eq!(bank.get_balance(&key.pubkey()), 1);
assert_eq!(
bank.get_balance(&mint_keypair.pubkey()),
initial_mint_balance - 1 - expensive_lamports_per_signature
);
}
#[test]
fn test_filter_program_errors_and_collect_fee() {
let leader = solana_sdk::pubkey::new_rand();
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(100, &leader, 3);
genesis_config.fee_rate_governor = FeeRateGovernor::new(2, 0);
let bank = Bank::new_for_tests(&genesis_config);
let key = Keypair::new();
let tx1 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
&mint_keypair,
&key.pubkey(),
2,
genesis_config.hash(),
));
let tx2 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
&mint_keypair,
&key.pubkey(),
5,
genesis_config.hash(),
));
let results = vec![
(Ok(()), None),
(
Err(TransactionError::InstructionError(
1,
SystemError::ResultWithNegativeLamports.into(),
)),
None,
),
];
let initial_balance = bank.get_balance(&leader);
let results = bank.filter_program_errors_and_collect_fee(&[tx1, tx2], &results);
bank.freeze();
assert_eq!(
bank.get_balance(&leader),
initial_balance
+ bank
.fee_rate_governor
.burn(bank.fee_rate_governor.lamports_per_signature * 2)
.0
);
assert_eq!(results[0], Ok(()));
assert_eq!(results[1], Ok(()));
}
#[test]
fn test_debits_before_credits() {
let (genesis_config, mint_keypair) = create_genesis_config(2);
let bank = Bank::new_for_tests(&genesis_config);
let keypair = Keypair::new();
let tx0 = system_transaction::transfer(
&mint_keypair,
&keypair.pubkey(),
2,
genesis_config.hash(),
);
let tx1 = system_transaction::transfer(
&keypair,
&mint_keypair.pubkey(),
1,
genesis_config.hash(),
);
let txs = vec![tx0, tx1];
let results = bank.process_transactions(txs.iter());
assert!(results[1].is_err());
// Assert bad transactions aren't counted.
assert_eq!(bank.transaction_count(), 1);
}
#[test]
fn test_readonly_accounts() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 0);
let bank = Bank::new_for_tests(&genesis_config);
let vote_pubkey0 = solana_sdk::pubkey::new_rand();
let vote_pubkey1 = solana_sdk::pubkey::new_rand();
let vote_pubkey2 = solana_sdk::pubkey::new_rand();
let authorized_voter = Keypair::new();
let payer0 = Keypair::new();
let payer1 = Keypair::new();
// Create vote accounts
let vote_account0 =
vote_state::create_account(&vote_pubkey0, &authorized_voter.pubkey(), 0, 100);
let vote_account1 =
vote_state::create_account(&vote_pubkey1, &authorized_voter.pubkey(), 0, 100);
let vote_account2 =
vote_state::create_account(&vote_pubkey2, &authorized_voter.pubkey(), 0, 100);
bank.store_account(&vote_pubkey0, &vote_account0);
bank.store_account(&vote_pubkey1, &vote_account1);
bank.store_account(&vote_pubkey2, &vote_account2);
// Fund payers
bank.transfer(10, &mint_keypair, &payer0.pubkey()).unwrap();
bank.transfer(10, &mint_keypair, &payer1.pubkey()).unwrap();
bank.transfer(1, &mint_keypair, &authorized_voter.pubkey())
.unwrap();
let vote = Vote::new(vec![1], Hash::default());
let ix0 = vote_instruction::vote(&vote_pubkey0, &authorized_voter.pubkey(), vote.clone());
let tx0 = Transaction::new_signed_with_payer(
&[ix0],
Some(&payer0.pubkey()),
&[&payer0, &authorized_voter],
bank.last_blockhash(),
);
let ix1 = vote_instruction::vote(&vote_pubkey1, &authorized_voter.pubkey(), vote.clone());
let tx1 = Transaction::new_signed_with_payer(
&[ix1],
Some(&payer1.pubkey()),
&[&payer1, &authorized_voter],
bank.last_blockhash(),
);
let txs = vec![tx0, tx1];
let results = bank.process_transactions(txs.iter());
// If multiple transactions attempt to read the same account, they should succeed.
// Vote authorized_voter and sysvar accounts are given read-only handling
assert_eq!(results[0], Ok(()));
assert_eq!(results[1], Ok(()));
let ix0 = vote_instruction::vote(&vote_pubkey2, &authorized_voter.pubkey(), vote);
let tx0 = Transaction::new_signed_with_payer(
&[ix0],
Some(&payer0.pubkey()),
&[&payer0, &authorized_voter],
bank.last_blockhash(),
);
let tx1 = system_transaction::transfer(
&authorized_voter,
&solana_sdk::pubkey::new_rand(),
1,
bank.last_blockhash(),
);
let txs = vec![tx0, tx1];
let results = bank.process_transactions(txs.iter());
// However, an account may not be locked as read-only and writable at the same time.
assert_eq!(results[0], Ok(()));
assert_eq!(results[1], Err(TransactionError::AccountInUse));
}
#[test]
fn test_interleaving_locks() {
let (genesis_config, mint_keypair) = create_genesis_config(3);
let bank = Bank::new_for_tests(&genesis_config);
let alice = Keypair::new();
let bob = Keypair::new();
let tx1 =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash());
let pay_alice = vec![tx1];
let lock_result = bank.prepare_batch_for_tests(pay_alice);
let results_alice = bank
.load_execute_and_commit_transactions(
&lock_result,
MAX_PROCESSING_AGE,
false,
false,
false,
&mut ExecuteTimings::default(),
)
.0
.fee_collection_results;
assert_eq!(results_alice[0], Ok(()));
// try executing an interleaved transfer twice
assert_eq!(
bank.transfer(1, &mint_keypair, &bob.pubkey()),
Err(TransactionError::AccountInUse)
);
// the second time should fail as well
// this verifies that `unlock_accounts` doesn't unlock `AccountInUse` accounts
assert_eq!(
bank.transfer(1, &mint_keypair, &bob.pubkey()),
Err(TransactionError::AccountInUse)
);
drop(lock_result);
assert!(bank.transfer(2, &mint_keypair, &bob.pubkey()).is_ok());
}
#[test]
fn test_readonly_relaxed_locks() {
let (genesis_config, _) = create_genesis_config(3);
let bank = Bank::new_for_tests(&genesis_config);
let key0 = Keypair::new();
let key1 = Keypair::new();
let key2 = Keypair::new();
let key3 = solana_sdk::pubkey::new_rand();
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
num_readonly_signed_accounts: 0,
num_readonly_unsigned_accounts: 1,
},
account_keys: vec![key0.pubkey(), key3],
recent_blockhash: Hash::default(),
instructions: vec![],
};
let tx = Transaction::new(&[&key0], message, genesis_config.hash());
let txs = vec![tx];
let batch0 = bank.prepare_batch_for_tests(txs);
assert!(batch0.lock_results()[0].is_ok());
// Try locking accounts, locking a previously read-only account as writable
// should fail
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
num_readonly_signed_accounts: 0,
num_readonly_unsigned_accounts: 0,
},
account_keys: vec![key1.pubkey(), key3],
recent_blockhash: Hash::default(),
instructions: vec![],
};
let tx = Transaction::new(&[&key1], message, genesis_config.hash());
let txs = vec![tx];
let batch1 = bank.prepare_batch_for_tests(txs);
assert!(batch1.lock_results()[0].is_err());
// Try locking a previously read-only account a 2nd time; should succeed
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
num_readonly_signed_accounts: 0,
num_readonly_unsigned_accounts: 1,
},
account_keys: vec![key2.pubkey(), key3],
recent_blockhash: Hash::default(),
instructions: vec![],
};
let tx = Transaction::new(&[&key2], message, genesis_config.hash());
let txs = vec![tx];
let batch2 = bank.prepare_batch_for_tests(txs);
assert!(batch2.lock_results()[0].is_ok());
}
#[test]
fn test_bank_invalid_account_index() {
let (genesis_config, mint_keypair) = create_genesis_config(1);
let keypair = Keypair::new();
let bank = Bank::new_for_tests(&genesis_config);
let tx = system_transaction::transfer(
&mint_keypair,
&keypair.pubkey(),
1,
genesis_config.hash(),
);
let mut tx_invalid_program_index = tx.clone();
tx_invalid_program_index.message.instructions[0].program_id_index = 42;
assert_eq!(
bank.process_transaction(&tx_invalid_program_index),
Err(TransactionError::SanitizeFailure)
);
let mut tx_invalid_account_index = tx;
tx_invalid_account_index.message.instructions[0].accounts[0] = 42;
assert_eq!(
bank.process_transaction(&tx_invalid_account_index),
Err(TransactionError::SanitizeFailure)
);
}
#[test]
fn test_bank_pay_to_self() {
let (genesis_config, mint_keypair) = create_genesis_config(1);
let key1 = Keypair::new();
let bank = Bank::new_for_tests(&genesis_config);
bank.transfer(1, &mint_keypair, &key1.pubkey()).unwrap();
assert_eq!(bank.get_balance(&key1.pubkey()), 1);
let tx = system_transaction::transfer(&key1, &key1.pubkey(), 1, genesis_config.hash());
let _res = bank.process_transaction(&tx);
assert_eq!(bank.get_balance(&key1.pubkey()), 1);
bank.get_signature_status(&tx.signatures[0])
.unwrap()
.unwrap();
}
fn new_from_parent(parent: &Arc<Bank>) -> Bank {
Bank::new_from_parent(parent, &Pubkey::default(), parent.slot() + 1)
}
/// Verify that the parent's vector is computed correctly
#[test]
fn test_bank_parents() {
let (genesis_config, _) = create_genesis_config(1);
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let bank = new_from_parent(&parent);
assert!(Arc::ptr_eq(&bank.parents()[0], &parent));
}
/// Verifies that transactions are dropped if they have already been processed
#[test]
fn test_tx_already_processed() {
let (genesis_config, mint_keypair) = create_genesis_config(2);
let bank = Bank::new_for_tests(&genesis_config);
let key1 = Keypair::new();
let mut tx =
system_transaction::transfer(&mint_keypair, &key1.pubkey(), 1, genesis_config.hash());
// First process `tx` so that the status cache is updated
assert_eq!(bank.process_transaction(&tx), Ok(()));
// Ensure that signature check works
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::AlreadyProcessed)
);
// Change transaction signature to simulate processing a transaction with a different signature
// for the same message.
tx.signatures[0] = Signature::default();
// Ensure that message hash check works
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::AlreadyProcessed)
);
}
/// Verifies that last ids and status cache are correctly referenced from parent
#[test]
fn test_bank_parent_already_processed() {
let (genesis_config, mint_keypair) = create_genesis_config(2);
let key1 = Keypair::new();
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let tx =
system_transaction::transfer(&mint_keypair, &key1.pubkey(), 1, genesis_config.hash());
assert_eq!(parent.process_transaction(&tx), Ok(()));
let bank = new_from_parent(&parent);
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::AlreadyProcessed)
);
}
/// Verifies that last ids and accounts are correctly referenced from parent
#[test]
fn test_bank_parent_account_spend() {
let (genesis_config, mint_keypair) = create_genesis_config(2);
let key1 = Keypair::new();
let key2 = Keypair::new();
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let tx =
system_transaction::transfer(&mint_keypair, &key1.pubkey(), 1, genesis_config.hash());
assert_eq!(parent.process_transaction(&tx), Ok(()));
let bank = new_from_parent(&parent);
let tx = system_transaction::transfer(&key1, &key2.pubkey(), 1, genesis_config.hash());
assert_eq!(bank.process_transaction(&tx), Ok(()));
assert_eq!(parent.get_signature_status(&tx.signatures[0]), None);
}
#[test]
fn test_bank_hash_internal_state() {
let (genesis_config, mint_keypair) = create_genesis_config(2_000);
let bank0 = Bank::new_for_tests(&genesis_config);
let bank1 = Bank::new_for_tests(&genesis_config);
let initial_state = bank0.hash_internal_state();
assert_eq!(bank1.hash_internal_state(), initial_state);
let pubkey = solana_sdk::pubkey::new_rand();
bank0.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_ne!(bank0.hash_internal_state(), initial_state);
bank1.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state());
// Checkpointing should always result in a new state
let bank2 = new_from_parent(&Arc::new(bank1));
assert_ne!(bank0.hash_internal_state(), bank2.hash_internal_state());
let pubkey2 = solana_sdk::pubkey::new_rand();
info!("transfer 2 {}", pubkey2);
bank2.transfer(10, &mint_keypair, &pubkey2).unwrap();
bank2.update_accounts_hash();
assert!(bank2.verify_bank_hash(true));
}
#[test]
fn test_bank_hash_internal_state_verify() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(2_000);
let bank0 = Bank::new_for_tests(&genesis_config);
let pubkey = solana_sdk::pubkey::new_rand();
info!("transfer 0 {} mint: {}", pubkey, mint_keypair.pubkey());
bank0.transfer(1_000, &mint_keypair, &pubkey).unwrap();
let bank0_state = bank0.hash_internal_state();
let bank0 = Arc::new(bank0);
// Checkpointing should result in a new state while freezing the parent
let bank2 = Bank::new_from_parent(&bank0, &solana_sdk::pubkey::new_rand(), 1);
assert_ne!(bank0_state, bank2.hash_internal_state());
// Checkpointing should modify the checkpoint's state when freezed
assert_ne!(bank0_state, bank0.hash_internal_state());
// Checkpointing should never modify the checkpoint's state once frozen
let bank0_state = bank0.hash_internal_state();
bank2.update_accounts_hash();
assert!(bank2.verify_bank_hash(true));
let bank3 = Bank::new_from_parent(&bank0, &solana_sdk::pubkey::new_rand(), 2);
assert_eq!(bank0_state, bank0.hash_internal_state());
assert!(bank2.verify_bank_hash(true));
bank3.update_accounts_hash();
assert!(bank3.verify_bank_hash(true));
let pubkey2 = solana_sdk::pubkey::new_rand();
info!("transfer 2 {}", pubkey2);
bank2.transfer(10, &mint_keypair, &pubkey2).unwrap();
bank2.update_accounts_hash();
assert!(bank2.verify_bank_hash(true));
assert!(bank3.verify_bank_hash(true));
}
#[test]
#[should_panic(expected = "assertion failed: self.is_frozen()")]
fn test_verify_hash_unfrozen() {
let (genesis_config, _mint_keypair) = create_genesis_config(2_000);
let bank = Bank::new_for_tests(&genesis_config);
assert!(bank.verify_hash());
}
#[test]
fn test_verify_snapshot_bank() {
solana_logger::setup();
let pubkey = solana_sdk::pubkey::new_rand();
let (genesis_config, mint_keypair) = create_genesis_config(2_000);
let bank = Bank::new_for_tests(&genesis_config);
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
bank.freeze();
bank.update_accounts_hash();
assert!(bank.verify_snapshot_bank(true, false, None));
// tamper the bank after freeze!
bank.increment_signature_count(1);
assert!(!bank.verify_snapshot_bank(true, false, None));
}
// Test that two bank forks with the same accounts should not hash to the same value.
#[test]
fn test_bank_hash_internal_state_same_account_different_fork() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(2_000);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
let initial_state = bank0.hash_internal_state();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
assert_ne!(bank1.hash_internal_state(), initial_state);
info!("transfer bank1");
let pubkey = solana_sdk::pubkey::new_rand();
bank1.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_ne!(bank1.hash_internal_state(), initial_state);
info!("transfer bank2");
// bank2 should not hash the same as bank1
let bank2 = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank2.transfer(1_000, &mint_keypair, &pubkey).unwrap();
assert_ne!(bank2.hash_internal_state(), initial_state);
assert_ne!(bank1.hash_internal_state(), bank2.hash_internal_state());
}
#[test]
fn test_hash_internal_state_genesis() {
let bank0 = Bank::new_for_tests(&create_genesis_config(10).0);
let bank1 = Bank::new_for_tests(&create_genesis_config(20).0);
assert_ne!(bank0.hash_internal_state(), bank1.hash_internal_state());
}
// See that the order of two transfers does not affect the result
// of hash_internal_state
#[test]
fn test_hash_internal_state_order() {
let (genesis_config, mint_keypair) = create_genesis_config(100);
let bank0 = Bank::new_for_tests(&genesis_config);
let bank1 = Bank::new_for_tests(&genesis_config);
assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state());
let key0 = solana_sdk::pubkey::new_rand();
let key1 = solana_sdk::pubkey::new_rand();
bank0.transfer(10, &mint_keypair, &key0).unwrap();
bank0.transfer(20, &mint_keypair, &key1).unwrap();
bank1.transfer(20, &mint_keypair, &key1).unwrap();
bank1.transfer(10, &mint_keypair, &key0).unwrap();
assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state());
}
#[test]
fn test_hash_internal_state_error() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(100);
let bank = Bank::new_for_tests(&genesis_config);
let key0 = solana_sdk::pubkey::new_rand();
bank.transfer(10, &mint_keypair, &key0).unwrap();
let orig = bank.hash_internal_state();
// Transfer will error but still take a fee
assert!(bank.transfer(1000, &mint_keypair, &key0).is_err());
assert_ne!(orig, bank.hash_internal_state());
let orig = bank.hash_internal_state();
let empty_keypair = Keypair::new();
assert!(bank.transfer(1000, &empty_keypair, &key0).is_err());
assert_eq!(orig, bank.hash_internal_state());
}
#[test]
fn test_bank_hash_internal_state_squash() {
let collector_id = Pubkey::default();
let bank0 = Arc::new(Bank::new_for_tests(&create_genesis_config(10).0));
let hash0 = bank0.hash_internal_state();
// save hash0 because new_from_parent
// updates sysvar entries
let bank1 = Bank::new_from_parent(&bank0, &collector_id, 1);
// no delta in bank1, hashes should always update
assert_ne!(hash0, bank1.hash_internal_state());
// remove parent
bank1.squash();
assert!(bank1.parents().is_empty());
}
/// Verifies that last ids and accounts are correctly referenced from parent
#[test]
fn test_bank_squash() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(2);
let key1 = Keypair::new();
let key2 = Keypair::new();
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let tx_transfer_mint_to_1 =
system_transaction::transfer(&mint_keypair, &key1.pubkey(), 1, genesis_config.hash());
trace!("parent process tx ");
assert_eq!(parent.process_transaction(&tx_transfer_mint_to_1), Ok(()));
trace!("done parent process tx ");
assert_eq!(parent.transaction_count(), 1);
assert_eq!(
parent.get_signature_status(&tx_transfer_mint_to_1.signatures[0]),
Some(Ok(()))
);
trace!("new from parent");
let bank = new_from_parent(&parent);
trace!("done new from parent");
assert_eq!(
bank.get_signature_status(&tx_transfer_mint_to_1.signatures[0]),
Some(Ok(()))
);
assert_eq!(bank.transaction_count(), parent.transaction_count());
let tx_transfer_1_to_2 =
system_transaction::transfer(&key1, &key2.pubkey(), 1, genesis_config.hash());
assert_eq!(bank.process_transaction(&tx_transfer_1_to_2), Ok(()));
assert_eq!(bank.transaction_count(), 2);
assert_eq!(parent.transaction_count(), 1);
assert_eq!(
parent.get_signature_status(&tx_transfer_1_to_2.signatures[0]),
None
);
for _ in 0..3 {
// first time these should match what happened above, assert that parents are ok
assert_eq!(bank.get_balance(&key1.pubkey()), 0);
assert_eq!(bank.get_account(&key1.pubkey()), None);
assert_eq!(bank.get_balance(&key2.pubkey()), 1);
trace!("start");
assert_eq!(
bank.get_signature_status(&tx_transfer_mint_to_1.signatures[0]),
Some(Ok(()))
);
assert_eq!(
bank.get_signature_status(&tx_transfer_1_to_2.signatures[0]),
Some(Ok(()))
);
// works iteration 0, no-ops on iteration 1 and 2
trace!("SQUASH");
bank.squash();
assert_eq!(parent.transaction_count(), 1);
assert_eq!(bank.transaction_count(), 2);
}
}
#[test]
fn test_bank_get_account_in_parent_after_squash() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let key1 = Keypair::new();
parent.transfer(1, &mint_keypair, &key1.pubkey()).unwrap();
assert_eq!(parent.get_balance(&key1.pubkey()), 1);
let bank = new_from_parent(&parent);
bank.squash();
assert_eq!(parent.get_balance(&key1.pubkey()), 1);
}
#[test]
fn test_bank_get_account_in_parent_after_squash2() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(500);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
let key1 = Keypair::new();
bank0.transfer(1, &mint_keypair, &key1.pubkey()).unwrap();
assert_eq!(bank0.get_balance(&key1.pubkey()), 1);
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
bank1.transfer(3, &mint_keypair, &key1.pubkey()).unwrap();
let bank2 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 2));
bank2.transfer(2, &mint_keypair, &key1.pubkey()).unwrap();
let bank3 = Arc::new(Bank::new_from_parent(&bank1, &Pubkey::default(), 3));
bank1.squash();
// This picks up the values from 1 which is the highest root:
// TODO: if we need to access rooted banks older than this,
// need to fix the lookup.
assert_eq!(bank0.get_balance(&key1.pubkey()), 4);
assert_eq!(bank3.get_balance(&key1.pubkey()), 4);
assert_eq!(bank2.get_balance(&key1.pubkey()), 3);
bank3.squash();
assert_eq!(bank1.get_balance(&key1.pubkey()), 4);
let bank4 = Arc::new(Bank::new_from_parent(&bank3, &Pubkey::default(), 4));
bank4.transfer(4, &mint_keypair, &key1.pubkey()).unwrap();
assert_eq!(bank4.get_balance(&key1.pubkey()), 8);
assert_eq!(bank3.get_balance(&key1.pubkey()), 4);
bank4.squash();
let bank5 = Arc::new(Bank::new_from_parent(&bank4, &Pubkey::default(), 5));
bank5.squash();
let bank6 = Arc::new(Bank::new_from_parent(&bank5, &Pubkey::default(), 6));
bank6.squash();
// This picks up the values from 4 which is the highest root:
// TODO: if we need to access rooted banks older than this,
// need to fix the lookup.
assert_eq!(bank3.get_balance(&key1.pubkey()), 8);
assert_eq!(bank2.get_balance(&key1.pubkey()), 8);
assert_eq!(bank4.get_balance(&key1.pubkey()), 8);
}
#[test]
fn test_bank_get_account_modified_since_parent_with_fixed_root() {
let pubkey = solana_sdk::pubkey::new_rand();
let (genesis_config, mint_keypair) = create_genesis_config(500);
let bank1 = Arc::new(Bank::new_for_tests(&genesis_config));
bank1.transfer(1, &mint_keypair, &pubkey).unwrap();
let result = bank1.get_account_modified_since_parent_with_fixed_root(&pubkey);
assert!(result.is_some());
let (account, slot) = result.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(slot, 0);
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &Pubkey::default(), 1));
assert!(bank2
.get_account_modified_since_parent_with_fixed_root(&pubkey)
.is_none());
bank2.transfer(100, &mint_keypair, &pubkey).unwrap();
let result = bank1.get_account_modified_since_parent_with_fixed_root(&pubkey);
assert!(result.is_some());
let (account, slot) = result.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(slot, 0);
let result = bank2.get_account_modified_since_parent_with_fixed_root(&pubkey);
assert!(result.is_some());
let (account, slot) = result.unwrap();
assert_eq!(account.lamports(), 101);
assert_eq!(slot, 1);
bank1.squash();
let bank3 = Bank::new_from_parent(&bank2, &Pubkey::default(), 3);
assert_eq!(
None,
bank3.get_account_modified_since_parent_with_fixed_root(&pubkey)
);
}
#[test]
fn test_bank_update_sysvar_account() {
use sysvar::clock::Clock;
let dummy_clock_id = solana_sdk::pubkey::new_rand();
let dummy_rent_epoch = 44;
let (mut genesis_config, _mint_keypair) = create_genesis_config(500);
let expected_previous_slot = 3;
let mut expected_next_slot = expected_previous_slot + 1;
// First, initialize the clock sysvar
activate_all_features(&mut genesis_config);
let bank1 = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank1.calculate_capitalization(true), bank1.capitalization());
assert_capitalization_diff(
&bank1,
|| {
bank1.update_sysvar_account(&dummy_clock_id, |optional_account| {
assert!(optional_account.is_none());
let mut account = create_account(
&Clock {
slot: expected_previous_slot,
..Clock::default()
},
bank1.inherit_specially_retained_account_fields(optional_account),
);
account.set_rent_epoch(dummy_rent_epoch);
account
});
let current_account = bank1.get_account(&dummy_clock_id).unwrap();
assert_eq!(
expected_previous_slot,
from_account::<Clock, _>(¤t_account).unwrap().slot
);
assert_eq!(dummy_rent_epoch, current_account.rent_epoch());
},
|old, new| {
assert_eq!(
old + min_rent_excempt_balance_for_sysvars(&bank1, &[sysvar::clock::id()]),
new
);
},
);
assert_capitalization_diff(
&bank1,
|| {
bank1.update_sysvar_account(&dummy_clock_id, |optional_account| {
assert!(optional_account.is_some());
create_account(
&Clock {
slot: expected_previous_slot,
..Clock::default()
},
bank1.inherit_specially_retained_account_fields(optional_account),
)
})
},
|old, new| {
// creating new sysvar twice in a slot shouldn't increment capitalization twice
assert_eq!(old, new);
},
);
// Updating should increment the clock's slot
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &Pubkey::default(), 1));
assert_capitalization_diff(
&bank2,
|| {
bank2.update_sysvar_account(&dummy_clock_id, |optional_account| {
let slot = from_account::<Clock, _>(optional_account.as_ref().unwrap())
.unwrap()
.slot
+ 1;
create_account(
&Clock {
slot,
..Clock::default()
},
bank2.inherit_specially_retained_account_fields(optional_account),
)
});
let current_account = bank2.get_account(&dummy_clock_id).unwrap();
assert_eq!(
expected_next_slot,
from_account::<Clock, _>(¤t_account).unwrap().slot
);
// inherit_specially_retained_account_fields() now starts to inherit rent_epoch too
// with rent_for_sysvars
assert_eq!(dummy_rent_epoch, current_account.rent_epoch());
},
|old, new| {
// if existing, capitalization shouldn't change
assert_eq!(old, new);
},
);
// Updating again should give bank2's sysvar to the closure not bank1's.
// Thus, increment expected_next_slot accordingly
expected_next_slot += 1;
assert_capitalization_diff(
&bank2,
|| {
bank2.update_sysvar_account(&dummy_clock_id, |optional_account| {
let slot = from_account::<Clock, _>(optional_account.as_ref().unwrap())
.unwrap()
.slot
+ 1;
create_account(
&Clock {
slot,
..Clock::default()
},
bank2.inherit_specially_retained_account_fields(optional_account),
)
});
let current_account = bank2.get_account(&dummy_clock_id).unwrap();
assert_eq!(
expected_next_slot,
from_account::<Clock, _>(¤t_account).unwrap().slot
);
},
|old, new| {
// updating twice in a slot shouldn't increment capitalization twice
assert_eq!(old, new);
},
);
}
#[test]
fn test_bank_epoch_vote_accounts() {
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
let mut genesis_config =
create_genesis_config_with_leader(5, &leader_pubkey, leader_lamports).genesis_config;
// set this up weird, forces future generation, odd mod(), etc.
// this says: "vote_accounts for epoch X should be generated at slot index 3 in epoch X-2...
const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH as u64;
const LEADER_SCHEDULE_SLOT_OFFSET: u64 = SLOTS_PER_EPOCH * 3 - 3;
// no warmup allows me to do the normal division stuff below
genesis_config.epoch_schedule =
EpochSchedule::custom(SLOTS_PER_EPOCH, LEADER_SCHEDULE_SLOT_OFFSET, false);
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let mut leader_vote_stake: Vec<_> = parent
.epoch_vote_accounts(0)
.map(|accounts| {
accounts
.iter()
.filter_map(|(pubkey, (stake, account))| {
if let Ok(vote_state) = account.vote_state().as_ref() {
if vote_state.node_pubkey == leader_pubkey {
Some((*pubkey, *stake))
} else {
None
}
} else {
None
}
})
.collect()
})
.unwrap();
assert_eq!(leader_vote_stake.len(), 1);
let (leader_vote_account, leader_stake) = leader_vote_stake.pop().unwrap();
assert!(leader_stake > 0);
let leader_stake = Stake {
delegation: Delegation {
stake: leader_lamports,
activation_epoch: std::u64::MAX, // bootstrap
..Delegation::default()
},
..Stake::default()
};
let mut epoch = 1;
loop {
if epoch > LEADER_SCHEDULE_SLOT_OFFSET / SLOTS_PER_EPOCH {
break;
}
let vote_accounts = parent.epoch_vote_accounts(epoch);
assert!(vote_accounts.is_some());
// epoch_stakes are a snapshot at the leader_schedule_slot_offset boundary
// in the prior epoch (0 in this case)
assert_eq!(
leader_stake.stake(0, None),
vote_accounts.unwrap().get(&leader_vote_account).unwrap().0
);
epoch += 1;
}
// child crosses epoch boundary and is the first slot in the epoch
let child = Bank::new_from_parent(
&parent,
&leader_pubkey,
SLOTS_PER_EPOCH - (LEADER_SCHEDULE_SLOT_OFFSET % SLOTS_PER_EPOCH),
);
assert!(child.epoch_vote_accounts(epoch).is_some());
assert_eq!(
leader_stake.stake(child.epoch(), None),
child
.epoch_vote_accounts(epoch)
.unwrap()
.get(&leader_vote_account)
.unwrap()
.0
);
// child crosses epoch boundary but isn't the first slot in the epoch, still
// makes an epoch stakes snapshot at 1
let child = Bank::new_from_parent(
&parent,
&leader_pubkey,
SLOTS_PER_EPOCH - (LEADER_SCHEDULE_SLOT_OFFSET % SLOTS_PER_EPOCH) + 1,
);
assert!(child.epoch_vote_accounts(epoch).is_some());
assert_eq!(
leader_stake.stake(child.epoch(), None),
child
.epoch_vote_accounts(epoch)
.unwrap()
.get(&leader_vote_account)
.unwrap()
.0
);
}
#[test]
fn test_zero_signatures() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
bank.fee_rate_governor.lamports_per_signature = 2;
let key = Keypair::new();
let mut transfer_instruction =
system_instruction::transfer(&mint_keypair.pubkey(), &key.pubkey(), 0);
transfer_instruction.accounts[0].is_signer = false;
let message = Message::new(&[transfer_instruction], None);
let tx = Transaction::new(&[&Keypair::new(); 0], message, bank.last_blockhash());
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::SanitizeFailure)
);
assert_eq!(bank.get_balance(&key.pubkey()), 0);
}
#[test]
fn test_bank_get_slots_in_epoch() {
let (genesis_config, _) = create_genesis_config(500);
let bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.get_slots_in_epoch(0), MINIMUM_SLOTS_PER_EPOCH as u64);
assert_eq!(
bank.get_slots_in_epoch(2),
(MINIMUM_SLOTS_PER_EPOCH * 4) as u64
);
assert_eq!(
bank.get_slots_in_epoch(5000),
genesis_config.epoch_schedule.slots_per_epoch
);
}
#[test]
fn test_is_delta_true() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let key1 = Keypair::new();
let tx_transfer_mint_to_1 =
system_transaction::transfer(&mint_keypair, &key1.pubkey(), 1, genesis_config.hash());
assert_eq!(bank.process_transaction(&tx_transfer_mint_to_1), Ok(()));
assert!(bank.is_delta.load(Relaxed));
let bank1 = new_from_parent(&bank);
let hash1 = bank1.hash_internal_state();
assert!(!bank1.is_delta.load(Relaxed));
assert_ne!(hash1, bank.hash());
// ticks don't make a bank into a delta or change its state unless a block boundary is crossed
bank1.register_tick(&Hash::default());
assert!(!bank1.is_delta.load(Relaxed));
assert_eq!(bank1.hash_internal_state(), hash1);
}
#[test]
fn test_is_empty() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
let key1 = Keypair::new();
// The zeroth bank is empty becasue there are no transactions
assert!(bank0.is_empty());
// Set is_delta to true, bank is no longer empty
let tx_transfer_mint_to_1 =
system_transaction::transfer(&mint_keypair, &key1.pubkey(), 1, genesis_config.hash());
assert_eq!(bank0.process_transaction(&tx_transfer_mint_to_1), Ok(()));
assert!(!bank0.is_empty());
}
#[test]
fn test_bank_inherit_tx_count() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
// Bank 1
let bank1 = Arc::new(Bank::new_from_parent(
&bank0,
&solana_sdk::pubkey::new_rand(),
1,
));
// Bank 2
let bank2 = Bank::new_from_parent(&bank0, &solana_sdk::pubkey::new_rand(), 2);
// transfer a token
assert_eq!(
bank1.process_transaction(&system_transaction::transfer(
&mint_keypair,
&Keypair::new().pubkey(),
1,
genesis_config.hash(),
)),
Ok(())
);
assert_eq!(bank0.transaction_count(), 0);
assert_eq!(bank2.transaction_count(), 0);
assert_eq!(bank1.transaction_count(), 1);
bank1.squash();
assert_eq!(bank0.transaction_count(), 0);
assert_eq!(bank2.transaction_count(), 0);
assert_eq!(bank1.transaction_count(), 1);
let bank6 = Bank::new_from_parent(&bank1, &solana_sdk::pubkey::new_rand(), 3);
assert_eq!(bank1.transaction_count(), 1);
assert_eq!(bank6.transaction_count(), 1);
bank6.squash();
assert_eq!(bank6.transaction_count(), 1);
}
#[test]
fn test_bank_inherit_fee_rate_governor() {
let (mut genesis_config, _mint_keypair) = create_genesis_config(500);
genesis_config
.fee_rate_governor
.target_lamports_per_signature = 123;
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
let bank1 = Arc::new(new_from_parent(&bank0));
assert_eq!(
bank0.fee_rate_governor.target_lamports_per_signature / 2,
bank1
.fee_rate_governor
.create_fee_calculator()
.lamports_per_signature
);
}
#[test]
fn test_bank_vote_accounts() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 1);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let vote_accounts = bank.vote_accounts();
assert_eq!(vote_accounts.len(), 1); // bootstrap validator has
// to have a vote account
let vote_keypair = Keypair::new();
let instructions = vote_instruction::create_account(
&mint_keypair.pubkey(),
&vote_keypair.pubkey(),
&VoteInit {
node_pubkey: mint_keypair.pubkey(),
authorized_voter: vote_keypair.pubkey(),
authorized_withdrawer: vote_keypair.pubkey(),
commission: 0,
},
10,
);
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
let transaction = Transaction::new(
&[&mint_keypair, &vote_keypair],
message,
bank.last_blockhash(),
);
bank.process_transaction(&transaction).unwrap();
let vote_accounts = bank.vote_accounts();
assert_eq!(vote_accounts.len(), 2);
assert!(vote_accounts.get(&vote_keypair.pubkey()).is_some());
assert!(bank.withdraw(&vote_keypair.pubkey(), 10).is_ok());
let vote_accounts = bank.vote_accounts();
assert_eq!(vote_accounts.len(), 1);
}
#[test]
fn test_bank_cloned_stake_delegations() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 1);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let stake_delegations = bank.cloned_stake_delegations();
assert_eq!(stake_delegations.len(), 1); // bootstrap validator has
// to have a stake delegation
let vote_keypair = Keypair::new();
let mut instructions = vote_instruction::create_account(
&mint_keypair.pubkey(),
&vote_keypair.pubkey(),
&VoteInit {
node_pubkey: mint_keypair.pubkey(),
authorized_voter: vote_keypair.pubkey(),
authorized_withdrawer: vote_keypair.pubkey(),
commission: 0,
},
10,
);
let stake_keypair = Keypair::new();
instructions.extend(stake_instruction::create_account_and_delegate_stake(
&mint_keypair.pubkey(),
&stake_keypair.pubkey(),
&vote_keypair.pubkey(),
&Authorized::auto(&stake_keypair.pubkey()),
&Lockup::default(),
10,
));
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
let transaction = Transaction::new(
&[&mint_keypair, &vote_keypair, &stake_keypair],
message,
bank.last_blockhash(),
);
bank.process_transaction(&transaction).unwrap();
let stake_delegations = bank.cloned_stake_delegations();
assert_eq!(stake_delegations.len(), 2);
assert!(stake_delegations.get(&stake_keypair.pubkey()).is_some());
}
#[allow(deprecated)]
#[test]
fn test_bank_fees_account() {
let (mut genesis_config, _) = create_genesis_config(500);
genesis_config.fee_rate_governor = FeeRateGovernor::new(12345, 0);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let fees_account = bank.get_account(&sysvar::fees::id()).unwrap();
let fees = from_account::<Fees, _>(&fees_account).unwrap();
assert_eq!(
bank.fee_rate_governor.lamports_per_signature,
fees.fee_calculator.lamports_per_signature
);
assert_eq!(fees.fee_calculator.lamports_per_signature, 12345);
}
#[test]
fn test_is_delta_with_no_committables() {
let (genesis_config, mint_keypair) = create_genesis_config(8000);
let bank = Bank::new_for_tests(&genesis_config);
bank.is_delta.store(false, Relaxed);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let fail_tx =
system_transaction::transfer(&keypair1, &keypair2.pubkey(), 1, bank.last_blockhash());
// Should fail with TransactionError::AccountNotFound, which means
// the account which this tx operated on will not be committed. Thus
// the bank is_delta should still be false
assert_eq!(
bank.process_transaction(&fail_tx),
Err(TransactionError::AccountNotFound)
);
// Check the bank is_delta is still false
assert!(!bank.is_delta.load(Relaxed));
// Should fail with InstructionError, but InstructionErrors are committable,
// so is_delta should be true
assert_eq!(
bank.transfer(10_001, &mint_keypair, &solana_sdk::pubkey::new_rand()),
Err(TransactionError::InstructionError(
0,
SystemError::ResultWithNegativeLamports.into(),
))
);
assert!(bank.is_delta.load(Relaxed));
}
#[test]
fn test_bank_get_program_accounts() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
parent.restore_old_behavior_for_fragile_tests();
let genesis_accounts: Vec<_> = parent.get_all_accounts_with_modified_slots().unwrap();
assert!(
genesis_accounts
.iter()
.any(|(pubkey, _, _)| *pubkey == mint_keypair.pubkey()),
"mint pubkey not found"
);
assert!(
genesis_accounts
.iter()
.any(|(pubkey, _, _)| solana_sdk::sysvar::is_sysvar_id(pubkey)),
"no sysvars found"
);
let bank0 = Arc::new(new_from_parent(&parent));
let pubkey0 = solana_sdk::pubkey::new_rand();
let program_id = Pubkey::new(&[2; 32]);
let account0 = AccountSharedData::new(1, 0, &program_id);
bank0.store_account(&pubkey0, &account0);
assert_eq!(
bank0.get_program_accounts_modified_since_parent(&program_id),
vec![(pubkey0, account0.clone())]
);
let bank1 = Arc::new(new_from_parent(&bank0));
bank1.squash();
assert_eq!(
bank0
.get_program_accounts(&program_id, &ScanConfig::default(),)
.unwrap(),
vec![(pubkey0, account0.clone())]
);
assert_eq!(
bank1
.get_program_accounts(&program_id, &ScanConfig::default(),)
.unwrap(),
vec![(pubkey0, account0)]
);
assert_eq!(
bank1.get_program_accounts_modified_since_parent(&program_id),
vec![]
);
let bank2 = Arc::new(new_from_parent(&bank1));
let pubkey1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(3, 0, &program_id);
bank2.store_account(&pubkey1, &account1);
// Accounts with 0 lamports should be filtered out by Accounts::load_by_program()
let pubkey2 = solana_sdk::pubkey::new_rand();
let account2 = AccountSharedData::new(0, 0, &program_id);
bank2.store_account(&pubkey2, &account2);
let bank3 = Arc::new(new_from_parent(&bank2));
bank3.squash();
assert_eq!(
bank1
.get_program_accounts(&program_id, &ScanConfig::default(),)
.unwrap()
.len(),
2
);
assert_eq!(
bank3
.get_program_accounts(&program_id, &ScanConfig::default(),)
.unwrap()
.len(),
2
);
}
#[test]
fn test_get_filtered_indexed_accounts_limit_exceeded() {
let (genesis_config, _mint_keypair) = create_genesis_config(500);
let mut account_indexes = AccountSecondaryIndexes::default();
account_indexes.indexes.insert(AccountIndex::ProgramId);
let bank = Arc::new(Bank::new_with_config(
&genesis_config,
account_indexes,
false,
AccountShrinkThreshold::default(),
));
let address = Pubkey::new_unique();
let program_id = Pubkey::new_unique();
let limit = 100;
let account = AccountSharedData::new(1, limit, &program_id);
bank.store_account(&address, &account);
assert!(bank
.get_filtered_indexed_accounts(
&IndexKey::ProgramId(program_id),
|_| true,
&ScanConfig::default(),
Some(limit), // limit here will be exceeded, resulting in aborted scan
)
.is_err());
}
#[test]
fn test_get_filtered_indexed_accounts() {
let (genesis_config, _mint_keypair) = create_genesis_config(500);
let mut account_indexes = AccountSecondaryIndexes::default();
account_indexes.indexes.insert(AccountIndex::ProgramId);
let bank = Arc::new(Bank::new_with_config(
&genesis_config,
account_indexes,
false,
AccountShrinkThreshold::default(),
));
let address = Pubkey::new_unique();
let program_id = Pubkey::new_unique();
let account = AccountSharedData::new(1, 0, &program_id);
bank.store_account(&address, &account);
let indexed_accounts = bank
.get_filtered_indexed_accounts(
&IndexKey::ProgramId(program_id),
|_| true,
&ScanConfig::default(),
None,
)
.unwrap();
assert_eq!(indexed_accounts.len(), 1);
assert_eq!(indexed_accounts[0], (address, account));
// Even though the account is re-stored in the bank (and the index) under a new program id,
// it is still present in the index under the original program id as well. This
// demonstrates the need for a redundant post-processing filter.
let another_program_id = Pubkey::new_unique();
let new_account = AccountSharedData::new(1, 0, &another_program_id);
let bank = Arc::new(new_from_parent(&bank));
bank.store_account(&address, &new_account);
let indexed_accounts = bank
.get_filtered_indexed_accounts(
&IndexKey::ProgramId(program_id),
|_| true,
&ScanConfig::default(),
None,
)
.unwrap();
assert_eq!(indexed_accounts.len(), 1);
assert_eq!(indexed_accounts[0], (address, new_account.clone()));
let indexed_accounts = bank
.get_filtered_indexed_accounts(
&IndexKey::ProgramId(another_program_id),
|_| true,
&ScanConfig::default(),
None,
)
.unwrap();
assert_eq!(indexed_accounts.len(), 1);
assert_eq!(indexed_accounts[0], (address, new_account.clone()));
// Post-processing filter
let indexed_accounts = bank
.get_filtered_indexed_accounts(
&IndexKey::ProgramId(program_id),
|account| account.owner() == &program_id,
&ScanConfig::default(),
None,
)
.unwrap();
assert!(indexed_accounts.is_empty());
let indexed_accounts = bank
.get_filtered_indexed_accounts(
&IndexKey::ProgramId(another_program_id),
|account| account.owner() == &another_program_id,
&ScanConfig::default(),
None,
)
.unwrap();
assert_eq!(indexed_accounts.len(), 1);
assert_eq!(indexed_accounts[0], (address, new_account));
}
#[test]
fn test_status_cache_ancestors() {
solana_logger::setup();
let (genesis_config, _mint_keypair) = create_genesis_config(500);
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let bank1 = Arc::new(new_from_parent(&parent));
let mut bank = bank1;
for _ in 0..MAX_CACHE_ENTRIES * 2 {
bank = Arc::new(new_from_parent(&bank));
bank.squash();
}
let bank = new_from_parent(&bank);
assert_eq!(
bank.status_cache_ancestors(),
(bank.slot() - MAX_CACHE_ENTRIES as u64..=bank.slot()).collect::<Vec<_>>()
);
}
#[test]
fn test_add_builtin() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
fn mock_vote_program_id() -> Pubkey {
Pubkey::new(&[42u8; 32])
}
fn mock_vote_processor(
_first_instruction_account: usize,
_instruction_data: &[u8],
invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
let program_id = invoke_context.get_caller()?;
if mock_vote_program_id() != *program_id {
return Err(InstructionError::IncorrectProgramId);
}
Err(InstructionError::Custom(42))
}
assert!(bank.get_account(&mock_vote_program_id()).is_none());
bank.add_builtin(
"mock_vote_program",
&mock_vote_program_id(),
mock_vote_processor,
);
assert!(bank.get_account(&mock_vote_program_id()).is_some());
let mock_account = Keypair::new();
let mock_validator_identity = Keypair::new();
let mut instructions = vote_instruction::create_account(
&mint_keypair.pubkey(),
&mock_account.pubkey(),
&VoteInit {
node_pubkey: mock_validator_identity.pubkey(),
..VoteInit::default()
},
1,
);
instructions[1].program_id = mock_vote_program_id();
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
let transaction = Transaction::new(
&[&mint_keypair, &mock_account, &mock_validator_identity],
message,
bank.last_blockhash(),
);
assert_eq!(
bank.process_transaction(&transaction),
Err(TransactionError::InstructionError(
1,
InstructionError::Custom(42)
))
);
}
#[test]
fn test_add_duplicate_static_program() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 0);
let mut bank = Bank::new_for_tests(&genesis_config);
fn mock_vote_processor(
_first_instruction_account: usize,
_data: &[u8],
_invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
Err(InstructionError::Custom(42))
}
let mock_account = Keypair::new();
let mock_validator_identity = Keypair::new();
let instructions = vote_instruction::create_account(
&mint_keypair.pubkey(),
&mock_account.pubkey(),
&VoteInit {
node_pubkey: mock_validator_identity.pubkey(),
..VoteInit::default()
},
1,
);
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
let transaction = Transaction::new(
&[&mint_keypair, &mock_account, &mock_validator_identity],
message,
bank.last_blockhash(),
);
let vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap();
bank.add_builtin(
"solana_vote_program",
&solana_vote_program::id(),
mock_vote_processor,
);
let new_vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap();
// Vote loader account should not be updated since it was included in the genesis config.
assert_eq!(vote_loader_account.data(), new_vote_loader_account.data());
assert_eq!(
bank.process_transaction(&transaction),
Err(TransactionError::InstructionError(
1,
InstructionError::Custom(42)
))
);
}
#[test]
fn test_add_instruction_processor_for_existing_unrelated_accounts() {
let (genesis_config, _mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
fn mock_ix_processor(
_first_instruction_account: usize,
_data: &[u8],
_invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
Err(InstructionError::Custom(42))
}
// Non-builtin loader accounts can not be used for instruction processing
{
let stakes = bank.stakes_cache.stakes();
assert!(stakes.vote_accounts().as_ref().is_empty());
}
assert!(bank.stakes_cache.stakes().stake_delegations().is_empty());
assert_eq!(bank.calculate_capitalization(true), bank.capitalization());
let ((vote_id, vote_account), (stake_id, stake_account)) =
crate::stakes::tests::create_staked_node_accounts(1_0000);
bank.capitalization
.fetch_add(vote_account.lamports() + stake_account.lamports(), Relaxed);
bank.store_account(&vote_id, &vote_account);
bank.store_account(&stake_id, &stake_account);
{
let stakes = bank.stakes_cache.stakes();
assert!(!stakes.vote_accounts().as_ref().is_empty());
}
assert!(!bank.stakes_cache.stakes().stake_delegations().is_empty());
assert_eq!(bank.calculate_capitalization(true), bank.capitalization());
bank.add_builtin("mock_program1", &vote_id, mock_ix_processor);
bank.add_builtin("mock_program2", &stake_id, mock_ix_processor);
{
let stakes = bank.stakes_cache.stakes();
assert!(stakes.vote_accounts().as_ref().is_empty());
}
assert!(bank.stakes_cache.stakes().stake_delegations().is_empty());
assert_eq!(bank.calculate_capitalization(true), bank.capitalization());
assert_eq!(
"mock_program1",
String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data())
);
assert_eq!(
"mock_program2",
String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data())
);
// Re-adding builtin programs should be no-op
bank.update_accounts_hash();
let old_hash = bank.get_accounts_hash();
bank.add_builtin("mock_program1", &vote_id, mock_ix_processor);
bank.add_builtin("mock_program2", &stake_id, mock_ix_processor);
bank.update_accounts_hash();
let new_hash = bank.get_accounts_hash();
assert_eq!(old_hash, new_hash);
{
let stakes = bank.stakes_cache.stakes();
assert!(stakes.vote_accounts().as_ref().is_empty());
}
assert!(bank.stakes_cache.stakes().stake_delegations().is_empty());
assert_eq!(bank.calculate_capitalization(true), bank.capitalization());
assert_eq!(
"mock_program1",
String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data())
);
assert_eq!(
"mock_program2",
String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data())
);
}
#[allow(deprecated)]
#[test]
fn test_recent_blockhashes_sysvar() {
let (genesis_config, _mint_keypair) = create_genesis_config(500);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
for i in 1..5 {
let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap();
let recent_blockhashes =
from_account::<sysvar::recent_blockhashes::RecentBlockhashes, _>(&bhq_account)
.unwrap();
// Check length
assert_eq!(recent_blockhashes.len(), i);
let most_recent_hash = recent_blockhashes.iter().next().unwrap().blockhash;
// Check order
assert_eq!(Some(true), bank.check_hash_age(&most_recent_hash, 0));
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
}
#[allow(deprecated)]
#[test]
fn test_blockhash_queue_sysvar_consistency() {
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap();
let recent_blockhashes =
from_account::<sysvar::recent_blockhashes::RecentBlockhashes, _>(&bhq_account).unwrap();
let sysvar_recent_blockhash = recent_blockhashes[0].blockhash;
let bank_last_blockhash = bank.last_blockhash();
assert_eq!(sysvar_recent_blockhash, bank_last_blockhash);
}
#[test]
fn test_hash_internal_state_unchanged() {
let (genesis_config, _) = create_genesis_config(500);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
bank0.freeze();
let bank0_hash = bank0.hash();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank1.freeze();
let bank1_hash = bank1.hash();
// Checkpointing should always result in a new state
assert_ne!(bank0_hash, bank1_hash);
}
#[test]
fn test_ticks_change_state() {
let (genesis_config, _) = create_genesis_config(500);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let bank1 = new_from_parent(&bank);
let hash1 = bank1.hash_internal_state();
// ticks don't change its state unless a block boundary is crossed
for _ in 0..genesis_config.ticks_per_slot {
assert_eq!(bank1.hash_internal_state(), hash1);
bank1.register_tick(&Hash::default());
}
assert_ne!(bank1.hash_internal_state(), hash1);
}
#[ignore]
#[test]
fn test_banks_leak() {
fn add_lotsa_stake_accounts(genesis_config: &mut GenesisConfig) {
const LOTSA: usize = 4_096;
(0..LOTSA).for_each(|_| {
let pubkey = solana_sdk::pubkey::new_rand();
genesis_config.add_account(
pubkey,
stake_state::create_lockup_stake_account(
&Authorized::auto(&pubkey),
&Lockup::default(),
&Rent::default(),
50_000_000,
),
);
});
}
solana_logger::setup();
let (mut genesis_config, _) = create_genesis_config(100_000_000_000_000);
add_lotsa_stake_accounts(&mut genesis_config);
let mut bank = std::sync::Arc::new(Bank::new_for_tests(&genesis_config));
let mut num_banks = 0;
let pid = std::process::id();
#[cfg(not(target_os = "linux"))]
error!(
"\nYou can run this to watch RAM:\n while read -p 'banks: '; do echo $(( $(ps -o vsize= -p {})/$REPLY));done", pid
);
loop {
num_banks += 1;
bank = std::sync::Arc::new(new_from_parent(&bank));
if num_banks % 100 == 0 {
#[cfg(target_os = "linux")]
{
let pages_consumed = std::fs::read_to_string(format!("/proc/{}/statm", pid))
.unwrap()
.split_whitespace()
.next()
.unwrap()
.parse::<usize>()
.unwrap();
error!(
"at {} banks: {} mem or {}kB/bank",
num_banks,
pages_consumed * 4096,
(pages_consumed * 4) / num_banks
);
}
#[cfg(not(target_os = "linux"))]
{
error!("{} banks, sleeping for 5 sec", num_banks);
std::thread::sleep(Duration::new(5, 0));
}
}
}
}
fn get_nonce_blockhash(bank: &Bank, nonce_pubkey: &Pubkey) -> Option<Hash> {
bank.get_account(nonce_pubkey).and_then(|acc| {
let state =
StateMut::<nonce::state::Versions>::state(&acc).map(|v| v.convert_to_current());
match state {
Ok(nonce::State::Initialized(ref data)) => Some(data.blockhash),
_ => None,
}
})
}
fn nonce_setup(
bank: &mut Arc<Bank>,
mint_keypair: &Keypair,
custodian_lamports: u64,
nonce_lamports: u64,
nonce_authority: Option<Pubkey>,
) -> Result<(Keypair, Keypair)> {
let custodian_keypair = Keypair::new();
let nonce_keypair = Keypair::new();
/* Setup accounts */
let mut setup_ixs = vec![system_instruction::transfer(
&mint_keypair.pubkey(),
&custodian_keypair.pubkey(),
custodian_lamports,
)];
let nonce_authority = nonce_authority.unwrap_or_else(|| nonce_keypair.pubkey());
setup_ixs.extend_from_slice(&system_instruction::create_nonce_account(
&custodian_keypair.pubkey(),
&nonce_keypair.pubkey(),
&nonce_authority,
nonce_lamports,
));
let message = Message::new(&setup_ixs, Some(&mint_keypair.pubkey()));
let setup_tx = Transaction::new(
&[mint_keypair, &custodian_keypair, &nonce_keypair],
message,
bank.last_blockhash(),
);
bank.process_transaction(&setup_tx)?;
Ok((custodian_keypair, nonce_keypair))
}
fn setup_nonce_with_bank<F>(
supply_lamports: u64,
mut genesis_cfg_fn: F,
custodian_lamports: u64,
nonce_lamports: u64,
nonce_authority: Option<Pubkey>,
) -> Result<(Arc<Bank>, Keypair, Keypair, Keypair)>
where
F: FnMut(&mut GenesisConfig),
{
let (mut genesis_config, mint_keypair) = create_genesis_config(supply_lamports);
genesis_config.rent.lamports_per_byte_year = 0;
genesis_cfg_fn(&mut genesis_config);
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
// Banks 0 and 1 have no fees, wait two blocks before
// initializing our nonce accounts
for _ in 0..2 {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
let (custodian_keypair, nonce_keypair) = nonce_setup(
&mut bank,
&mint_keypair,
custodian_lamports,
nonce_lamports,
nonce_authority,
)?;
Ok((bank, mint_keypair, custodian_keypair, nonce_keypair))
}
#[test]
fn test_check_transaction_for_nonce_ok() {
let (bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
let tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
let nonce_account = bank.get_account(&nonce_pubkey).unwrap();
assert_eq!(
bank.check_transaction_for_nonce(&SanitizedTransaction::from_transaction_for_tests(tx)),
Some((nonce_pubkey, nonce_account))
);
}
#[test]
fn test_check_transaction_for_nonce_not_nonce_fail() {
let (bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
let tx = Transaction::new_signed_with_payer(
&[
system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000),
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
assert!(bank
.check_transaction_for_nonce(&SanitizedTransaction::from_transaction_for_tests(tx,))
.is_none());
}
#[test]
fn test_check_transaction_for_nonce_missing_ix_pubkey_fail() {
let (bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
let mut tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
tx.message.instructions[0].accounts.clear();
assert!(bank
.check_transaction_for_nonce(&SanitizedTransaction::from_transaction_for_tests(tx))
.is_none());
}
#[test]
fn test_check_transaction_for_nonce_nonce_acc_does_not_exist_fail() {
let (bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
let missing_keypair = Keypair::new();
let missing_pubkey = missing_keypair.pubkey();
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
let tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&missing_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
assert!(bank
.check_transaction_for_nonce(&SanitizedTransaction::from_transaction_for_tests(tx))
.is_none());
}
#[test]
fn test_check_transaction_for_nonce_bad_tx_hash_fail() {
let (bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
let tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
Hash::default(),
);
assert!(bank
.check_transaction_for_nonce(&SanitizedTransaction::from_transaction_for_tests(tx))
.is_none());
}
#[test]
fn test_assign_from_nonce_account_fail() {
let (genesis_config, _mint_keypair) = create_genesis_config(100_000_000);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let nonce = Keypair::new();
let nonce_account = AccountSharedData::new_data(
42_424_242,
&nonce::state::Versions::new_current(nonce::State::Initialized(
nonce::state::Data::default(),
)),
&system_program::id(),
)
.unwrap();
let blockhash = bank.last_blockhash();
bank.store_account(&nonce.pubkey(), &nonce_account);
let ix = system_instruction::assign(&nonce.pubkey(), &Pubkey::new(&[9u8; 32]));
let message = Message::new(&[ix], Some(&nonce.pubkey()));
let tx = Transaction::new(&[&nonce], message, blockhash);
let expect = Err(TransactionError::InstructionError(
0,
InstructionError::ModifiedProgramId,
));
assert_eq!(bank.process_transaction(&tx), expect);
}
#[test]
fn test_nonce_transaction() {
let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
let alice_keypair = Keypair::new();
let alice_pubkey = alice_keypair.pubkey();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
assert_eq!(bank.get_balance(&custodian_pubkey), 4_750_000);
assert_eq!(bank.get_balance(&nonce_pubkey), 250_000);
/* Grab the hash stored in the nonce account */
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
/* Kick nonce hash off the blockhash_queue */
for _ in 0..MAX_RECENT_BLOCKHASHES + 1 {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
/* Expect a non-Nonce transfer to fail */
assert_eq!(
bank.process_transaction(&system_transaction::transfer(
&custodian_keypair,
&alice_pubkey,
100_000,
nonce_hash
),),
Err(TransactionError::BlockhashNotFound),
);
/* Check fee not charged */
assert_eq!(bank.get_balance(&custodian_pubkey), 4_750_000);
/* Nonce transfer */
let nonce_tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &alice_pubkey, 100_000),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
assert_eq!(bank.process_transaction(&nonce_tx), Ok(()));
/* Check balances */
let mut recent_message = nonce_tx.message;
recent_message.recent_blockhash = bank.last_blockhash();
let mut expected_balance = 4_650_000
- bank
.get_fee_for_message(&recent_message.try_into().unwrap())
.unwrap();
assert_eq!(bank.get_balance(&custodian_pubkey), expected_balance);
assert_eq!(bank.get_balance(&nonce_pubkey), 250_000);
assert_eq!(bank.get_balance(&alice_pubkey), 100_000);
/* Confirm stored nonce has advanced */
let new_nonce = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
assert_ne!(nonce_hash, new_nonce);
/* Nonce re-use fails */
let nonce_tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &alice_pubkey, 100_000),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
assert_eq!(
bank.process_transaction(&nonce_tx),
Err(TransactionError::BlockhashNotFound)
);
/* Check fee not charged and nonce not advanced */
assert_eq!(bank.get_balance(&custodian_pubkey), expected_balance);
assert_eq!(
new_nonce,
get_nonce_blockhash(&bank, &nonce_pubkey).unwrap()
);
let nonce_hash = new_nonce;
/* Kick nonce hash off the blockhash_queue */
for _ in 0..MAX_RECENT_BLOCKHASHES + 1 {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
let nonce_tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &alice_pubkey, 100_000_000),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
assert_eq!(
bank.process_transaction(&nonce_tx),
Err(TransactionError::InstructionError(
1,
system_instruction::SystemError::ResultWithNegativeLamports.into(),
))
);
/* Check fee charged and nonce has advanced */
let mut recent_message = nonce_tx.message.clone();
recent_message.recent_blockhash = bank.last_blockhash();
expected_balance -= bank
.get_fee_for_message(&SanitizedMessage::try_from(recent_message).unwrap())
.unwrap();
assert_eq!(bank.get_balance(&custodian_pubkey), expected_balance);
assert_ne!(
nonce_hash,
get_nonce_blockhash(&bank, &nonce_pubkey).unwrap()
);
/* Confirm replaying a TX that failed with InstructionError::* now
* fails with TransactionError::BlockhashNotFound
*/
assert_eq!(
bank.process_transaction(&nonce_tx),
Err(TransactionError::BlockhashNotFound),
);
}
#[test]
fn test_nonce_authority() {
solana_logger::setup();
let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
let alice_keypair = Keypair::new();
let alice_pubkey = alice_keypair.pubkey();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
let bad_nonce_authority_keypair = Keypair::new();
let bad_nonce_authority = bad_nonce_authority_keypair.pubkey();
let custodian_account = bank.get_account(&custodian_pubkey).unwrap();
debug!("alice: {}", alice_pubkey);
debug!("custodian: {}", custodian_pubkey);
debug!("nonce: {}", nonce_pubkey);
debug!("nonce account: {:?}", bank.get_account(&nonce_pubkey));
debug!("cust: {:?}", custodian_account);
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
for _ in 0..MAX_RECENT_BLOCKHASHES + 1 {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
let nonce_tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &bad_nonce_authority),
system_instruction::transfer(&custodian_pubkey, &alice_pubkey, 42),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &bad_nonce_authority_keypair],
nonce_hash,
);
debug!("{:?}", nonce_tx);
let initial_custodian_balance = custodian_account.lamports();
assert_eq!(
bank.process_transaction(&nonce_tx),
Err(TransactionError::InstructionError(
0,
InstructionError::MissingRequiredSignature,
))
);
/* Check fee charged and nonce has *not* advanced */
let mut recent_message = nonce_tx.message;
recent_message.recent_blockhash = bank.last_blockhash();
assert_eq!(
bank.get_balance(&custodian_pubkey),
initial_custodian_balance
- bank
.get_fee_for_message(&recent_message.try_into().unwrap())
.unwrap()
);
assert_ne!(
nonce_hash,
get_nonce_blockhash(&bank, &nonce_pubkey).unwrap()
);
}
#[test]
fn test_nonce_payer() {
solana_logger::setup();
let nonce_starting_balance = 250_000;
let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, nonce_starting_balance, None)
.unwrap();
let alice_keypair = Keypair::new();
let alice_pubkey = alice_keypair.pubkey();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
debug!("alice: {}", alice_pubkey);
debug!("custodian: {}", custodian_pubkey);
debug!("nonce: {}", nonce_pubkey);
debug!("nonce account: {:?}", bank.get_account(&nonce_pubkey));
debug!("cust: {:?}", bank.get_account(&custodian_pubkey));
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
for _ in 0..MAX_RECENT_BLOCKHASHES + 1 {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
let nonce_tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(&custodian_pubkey, &alice_pubkey, 100_000_000),
],
Some(&nonce_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
debug!("{:?}", nonce_tx);
assert_eq!(
bank.process_transaction(&nonce_tx),
Err(TransactionError::InstructionError(
1,
system_instruction::SystemError::ResultWithNegativeLamports.into(),
))
);
/* Check fee charged and nonce has advanced */
let mut recent_message = nonce_tx.message;
recent_message.recent_blockhash = bank.last_blockhash();
assert_eq!(
bank.get_balance(&nonce_pubkey),
nonce_starting_balance
- bank
.get_fee_for_message(&recent_message.try_into().unwrap())
.unwrap()
);
assert_ne!(
nonce_hash,
get_nonce_blockhash(&bank, &nonce_pubkey).unwrap()
);
}
#[test]
fn test_nonce_fee_calculator_updates() {
let (mut genesis_config, mint_keypair) = create_genesis_config(1_000_000);
genesis_config.rent.lamports_per_byte_year = 0;
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
// Deliberately use bank 0 to initialize nonce account, so that nonce account fee_calculator indicates 0 fees
let (custodian_keypair, nonce_keypair) =
nonce_setup(&mut bank, &mint_keypair, 500_000, 100_000, None).unwrap();
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
// Grab the hash and fee_calculator stored in the nonce account
let (stored_nonce_hash, stored_fee_calculator) = bank
.get_account(&nonce_pubkey)
.and_then(|acc| {
let state =
StateMut::<nonce::state::Versions>::state(&acc).map(|v| v.convert_to_current());
match state {
Ok(nonce::State::Initialized(ref data)) => {
Some((data.blockhash, data.fee_calculator.clone()))
}
_ => None,
}
})
.unwrap();
// Kick nonce hash off the blockhash_queue
for _ in 0..MAX_RECENT_BLOCKHASHES + 1 {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
// Nonce transfer
let nonce_tx = Transaction::new_signed_with_payer(
&[
system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
system_instruction::transfer(
&custodian_pubkey,
&solana_sdk::pubkey::new_rand(),
100_000,
),
],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
stored_nonce_hash,
);
bank.process_transaction(&nonce_tx).unwrap();
// Grab the new hash and fee_calculator; both should be updated
let (nonce_hash, fee_calculator) = bank
.get_account(&nonce_pubkey)
.and_then(|acc| {
let state =
StateMut::<nonce::state::Versions>::state(&acc).map(|v| v.convert_to_current());
match state {
Ok(nonce::State::Initialized(ref data)) => {
Some((data.blockhash, data.fee_calculator.clone()))
}
_ => None,
}
})
.unwrap();
assert_ne!(stored_nonce_hash, nonce_hash);
assert_ne!(stored_fee_calculator, fee_calculator);
}
#[test]
fn test_check_ro_durable_nonce_fails() {
let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) =
setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None).unwrap();
Arc::get_mut(&mut bank)
.unwrap()
.activate_feature(&feature_set::nonce_must_be_writable::id());
let custodian_pubkey = custodian_keypair.pubkey();
let nonce_pubkey = nonce_keypair.pubkey();
let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
let account_metas = vec![
AccountMeta::new_readonly(nonce_pubkey, false),
#[allow(deprecated)]
AccountMeta::new_readonly(sysvar::recent_blockhashes::id(), false),
AccountMeta::new_readonly(nonce_pubkey, true),
];
let nonce_instruction = Instruction::new_with_bincode(
system_program::id(),
&system_instruction::SystemInstruction::AdvanceNonceAccount,
account_metas,
);
let tx = Transaction::new_signed_with_payer(
&[nonce_instruction],
Some(&custodian_pubkey),
&[&custodian_keypair, &nonce_keypair],
nonce_hash,
);
// Caught by the system program because the tx hash is valid
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::InstructionError(
0,
InstructionError::InvalidArgument
))
);
// Kick nonce hash off the blockhash_queue
for _ in 0..MAX_RECENT_BLOCKHASHES + 1 {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
bank = Arc::new(new_from_parent(&bank));
}
// Caught by the runtime because it is a nonce transaction
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::BlockhashNotFound)
);
assert_eq!(
bank.check_transaction_for_nonce(&SanitizedTransaction::from_transaction_for_tests(tx)),
None
);
}
#[test]
fn test_collect_balances() {
let (genesis_config, _mint_keypair) = create_genesis_config(500);
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let bank0 = Arc::new(new_from_parent(&parent));
let keypair = Keypair::new();
let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let program_id = Pubkey::new(&[2; 32]);
let keypair_account = AccountSharedData::new(8, 0, &program_id);
let account0 = AccountSharedData::new(11, 0, &program_id);
let program_account = AccountSharedData::new(1, 10, &Pubkey::default());
bank0.store_account(&keypair.pubkey(), &keypair_account);
bank0.store_account(&pubkey0, &account0);
bank0.store_account(&program_id, &program_account);
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx0 = Transaction::new_with_compiled_instructions(
&[&keypair],
&[pubkey0],
Hash::default(),
vec![program_id],
instructions,
);
let instructions = vec![CompiledInstruction::new(1, &(), vec![0])];
let tx1 = Transaction::new_with_compiled_instructions(
&[&keypair],
&[pubkey1],
Hash::default(),
vec![program_id],
instructions,
);
let txs = vec![tx0, tx1];
let batch = bank0.prepare_batch_for_tests(txs.clone());
let balances = bank0.collect_balances(&batch);
assert_eq!(balances.len(), 2);
assert_eq!(balances[0], vec![8, 11, 1]);
assert_eq!(balances[1], vec![8, 0, 1]);
let txs: Vec<_> = txs.into_iter().rev().collect();
let batch = bank0.prepare_batch_for_tests(txs);
let balances = bank0.collect_balances(&batch);
assert_eq!(balances.len(), 2);
assert_eq!(balances[0], vec![8, 0, 1]);
assert_eq!(balances[1], vec![8, 11, 1]);
}
#[test]
fn test_pre_post_transaction_balances() {
let (mut genesis_config, _mint_keypair) = create_genesis_config(500);
let fee_rate_governor = FeeRateGovernor::new(1, 0);
genesis_config.fee_rate_governor = fee_rate_governor;
let parent = Arc::new(Bank::new_for_tests(&genesis_config));
let bank0 = Arc::new(new_from_parent(&parent));
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let keypair0_account = AccountSharedData::new(8, 0, &Pubkey::default());
let keypair1_account = AccountSharedData::new(9, 0, &Pubkey::default());
let account0 = AccountSharedData::new(11, 0, &Pubkey::default());
bank0.store_account(&keypair0.pubkey(), &keypair0_account);
bank0.store_account(&keypair1.pubkey(), &keypair1_account);
bank0.store_account(&pubkey0, &account0);
let blockhash = bank0.last_blockhash();
let tx0 = system_transaction::transfer(&keypair0, &pubkey0, 2, blockhash);
let tx1 = system_transaction::transfer(&Keypair::new(), &pubkey1, 2, blockhash);
let tx2 = system_transaction::transfer(&keypair1, &pubkey2, 12, blockhash);
let txs = vec![tx0, tx1, tx2];
let lock_result = bank0.prepare_batch_for_tests(txs);
let (transaction_results, transaction_balances_set, inner_instructions, transaction_logs) =
bank0.load_execute_and_commit_transactions(
&lock_result,
MAX_PROCESSING_AGE,
true,
false,
false,
&mut ExecuteTimings::default(),
);
assert!(inner_instructions.iter().all(Option::is_none));
assert!(transaction_logs.iter().all(Option::is_none));
assert_eq!(inner_instructions.len(), 3);
assert_eq!(transaction_logs.len(), 3);
assert_eq!(transaction_balances_set.pre_balances.len(), 3);
assert_eq!(transaction_balances_set.post_balances.len(), 3);
assert!(transaction_results.execution_results[0].0.is_ok());
assert_eq!(transaction_balances_set.pre_balances[0], vec![8, 11, 1]);
assert_eq!(transaction_balances_set.post_balances[0], vec![5, 13, 1]);
// Failed transactions still produce balance sets
// This is a TransactionError - not possible to charge fees
assert!(transaction_results.execution_results[1].0.is_err());
assert_eq!(transaction_balances_set.pre_balances[1], vec![0, 0, 1]);
assert_eq!(transaction_balances_set.post_balances[1], vec![0, 0, 1]);
// Failed transactions still produce balance sets
// This is an InstructionError - fees charged
assert!(transaction_results.execution_results[2].0.is_err());
assert_eq!(transaction_balances_set.pre_balances[2], vec![9, 0, 1]);
assert_eq!(transaction_balances_set.post_balances[2], vec![8, 0, 1]);
}
#[test]
fn test_transaction_with_duplicate_accounts_in_instruction() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
fn mock_process_instruction(
first_instruction_account: usize,
data: &[u8],
invoke_context: &mut InvokeContext,
) -> result::Result<(), InstructionError> {
let keyed_accounts = invoke_context.get_keyed_accounts()?;
let lamports = data[0] as u64;
keyed_account_at_index(keyed_accounts, first_instruction_account + 2)?
.try_account_ref_mut()?
.checked_sub_lamports(lamports)?;
keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?
.try_account_ref_mut()?
.checked_add_lamports(lamports)?;
keyed_account_at_index(keyed_accounts, first_instruction_account)?
.try_account_ref_mut()?
.checked_sub_lamports(lamports)?;
keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?
.try_account_ref_mut()?
.checked_add_lamports(lamports)?;
Ok(())
}
let mock_program_id = Pubkey::new(&[2u8; 32]);
bank.add_builtin("mock_program", &mock_program_id, mock_process_instruction);
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let dup_pubkey = from_pubkey;
let from_account = AccountSharedData::new(100, 1, &mock_program_id);
let to_account = AccountSharedData::new(0, 1, &mock_program_id);
bank.store_account(&from_pubkey, &from_account);
bank.store_account(&to_pubkey, &to_account);
let account_metas = vec![
AccountMeta::new(from_pubkey, false),
AccountMeta::new(to_pubkey, false),
AccountMeta::new(dup_pubkey, false),
];
let instruction = Instruction::new_with_bincode(mock_program_id, &10, account_metas);
let tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
let result = bank.process_transaction(&tx);
assert_eq!(result, Ok(()));
assert_eq!(bank.get_balance(&from_pubkey), 80);
assert_eq!(bank.get_balance(&to_pubkey), 20);
}
#[test]
fn test_transaction_with_program_ids_passed_to_programs() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
#[allow(clippy::unnecessary_wraps)]
fn mock_process_instruction(
_first_instruction_account: usize,
_data: &[u8],
_invoke_context: &mut InvokeContext,
) -> result::Result<(), InstructionError> {
Ok(())
}
let mock_program_id = Pubkey::new(&[2u8; 32]);
bank.add_builtin("mock_program", &mock_program_id, mock_process_instruction);
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let dup_pubkey = from_pubkey;
let from_account = AccountSharedData::new(100, 1, &mock_program_id);
let to_account = AccountSharedData::new(0, 1, &mock_program_id);
bank.store_account(&from_pubkey, &from_account);
bank.store_account(&to_pubkey, &to_account);
let account_metas = vec![
AccountMeta::new(from_pubkey, false),
AccountMeta::new(to_pubkey, false),
AccountMeta::new(dup_pubkey, false),
AccountMeta::new(mock_program_id, false),
];
let instruction = Instruction::new_with_bincode(mock_program_id, &10, account_metas);
let tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
let result = bank.process_transaction(&tx);
assert_eq!(result, Ok(()));
}
#[test]
fn test_account_ids_after_program_ids() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let account_metas = vec![
AccountMeta::new(from_pubkey, false),
AccountMeta::new(to_pubkey, false),
];
let instruction =
Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas);
let mut tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
tx.message.account_keys.push(solana_sdk::pubkey::new_rand());
bank.add_builtin(
"mock_vote",
&solana_vote_program::id(),
mock_ok_vote_processor,
);
let result = bank.process_transaction(&tx);
assert_eq!(result, Ok(()));
let account = bank.get_account(&solana_vote_program::id()).unwrap();
info!("account: {:?}", account);
assert!(account.executable());
}
#[test]
fn test_incinerator() {
let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000_000);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
// Move to the first normal slot so normal rent behaviour applies
let bank = Bank::new_from_parent(
&bank0,
&Pubkey::default(),
genesis_config.epoch_schedule.first_normal_slot,
);
let pre_capitalization = bank.capitalization();
// Burn a non-rent exempt amount
let burn_amount = bank.get_minimum_balance_for_rent_exemption(0) - 1;
assert_eq!(bank.get_balance(&incinerator::id()), 0);
bank.transfer(burn_amount, &mint_keypair, &incinerator::id())
.unwrap();
assert_eq!(bank.get_balance(&incinerator::id()), burn_amount);
bank.freeze();
assert_eq!(bank.get_balance(&incinerator::id()), 0);
// Ensure that no rent was collected, and the entire burn amount was removed from bank
// capitalization
assert_eq!(bank.capitalization(), pre_capitalization - burn_amount);
}
#[test]
fn test_duplicate_account_key() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let account_metas = vec![
AccountMeta::new(from_pubkey, false),
AccountMeta::new(to_pubkey, false),
];
bank.add_builtin(
"mock_vote",
&solana_vote_program::id(),
mock_ok_vote_processor,
);
let instruction =
Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas);
let mut tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
tx.message.account_keys.push(from_pubkey);
let result = bank.process_transaction(&tx);
assert_eq!(result, Err(TransactionError::AccountLoadedTwice));
}
#[test]
fn test_program_id_as_payer() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let account_metas = vec![
AccountMeta::new(from_pubkey, false),
AccountMeta::new(to_pubkey, false),
];
bank.add_builtin(
"mock_vote",
&solana_vote_program::id(),
mock_ok_vote_processor,
);
let instruction =
Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas);
let mut tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
info!(
"mint: {} account keys: {:?}",
mint_keypair.pubkey(),
tx.message.account_keys
);
assert_eq!(tx.message.account_keys.len(), 4);
tx.message.account_keys.clear();
tx.message.account_keys.push(solana_vote_program::id());
tx.message.account_keys.push(mint_keypair.pubkey());
tx.message.account_keys.push(from_pubkey);
tx.message.account_keys.push(to_pubkey);
tx.message.instructions[0].program_id_index = 0;
tx.message.instructions[0].accounts.clear();
tx.message.instructions[0].accounts.push(2);
tx.message.instructions[0].accounts.push(3);
let result = bank.process_transaction(&tx);
assert_eq!(result, Err(TransactionError::SanitizeFailure));
}
#[allow(clippy::unnecessary_wraps)]
fn mock_ok_vote_processor(
_first_instruction_account: usize,
_data: &[u8],
_invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
Ok(())
}
#[test]
fn test_ref_account_key_after_program_id() {
let (genesis_config, mint_keypair) = create_genesis_config(500);
let mut bank = Bank::new_for_tests(&genesis_config);
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let account_metas = vec![
AccountMeta::new(from_pubkey, false),
AccountMeta::new(to_pubkey, false),
];
bank.add_builtin(
"mock_vote",
&solana_vote_program::id(),
mock_ok_vote_processor,
);
let instruction =
Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas);
let mut tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
tx.message.account_keys.push(solana_sdk::pubkey::new_rand());
assert_eq!(tx.message.account_keys.len(), 5);
tx.message.instructions[0].accounts.remove(0);
tx.message.instructions[0].accounts.push(4);
let result = bank.process_transaction(&tx);
assert_eq!(result, Ok(()));
}
#[test]
fn test_fuzz_instructions() {
solana_logger::setup();
use rand::{thread_rng, Rng};
let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000_000);
let mut bank = Bank::new_for_tests(&genesis_config);
let max_programs = 5;
let program_keys: Vec<_> = (0..max_programs)
.enumerate()
.map(|i| {
let key = solana_sdk::pubkey::new_rand();
let name = format!("program{:?}", i);
bank.add_builtin(&name, &key, mock_ok_vote_processor);
(key, name.as_bytes().to_vec())
})
.collect();
let max_keys = 100;
let keys: Vec<_> = (0..max_keys)
.enumerate()
.map(|_| {
let key = solana_sdk::pubkey::new_rand();
let balance = if thread_rng().gen_ratio(9, 10) {
let lamports = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, 10)
} else {
thread_rng().gen_range(20, 100)
};
let space = thread_rng().gen_range(0, 10);
let owner = Pubkey::default();
let account = AccountSharedData::new(lamports, space, &owner);
bank.store_account(&key, &account);
lamports
} else {
0
};
(key, balance)
})
.collect();
let mut results = HashMap::new();
for _ in 0..2_000 {
let num_keys = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, max_keys)
} else {
thread_rng().gen_range(1, 4)
};
let num_instructions = thread_rng().gen_range(0, max_keys - num_keys);
let mut account_keys: Vec<_> = if thread_rng().gen_ratio(1, 5) {
(0..num_keys)
.map(|_| {
let idx = thread_rng().gen_range(0, keys.len());
keys[idx].0
})
.collect()
} else {
let mut inserted = HashSet::new();
(0..num_keys)
.map(|_| {
let mut idx;
loop {
idx = thread_rng().gen_range(0, keys.len());
if !inserted.contains(&idx) {
break;
}
}
inserted.insert(idx);
keys[idx].0
})
.collect()
};
let instructions: Vec<_> = if num_keys > 0 {
(0..num_instructions)
.map(|_| {
let num_accounts_to_pass = thread_rng().gen_range(0, num_keys);
let account_indexes = (0..num_accounts_to_pass)
.map(|_| thread_rng().gen_range(0, num_keys))
.collect();
let program_index: u8 = thread_rng().gen_range(0, num_keys) as u8;
if thread_rng().gen_ratio(4, 5) {
let programs_index = thread_rng().gen_range(0, program_keys.len());
account_keys[program_index as usize] = program_keys[programs_index].0;
}
CompiledInstruction::new(program_index, &10, account_indexes)
})
.collect()
} else {
vec![]
};
let account_keys_len = std::cmp::max(account_keys.len(), 2);
let num_signatures = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, account_keys_len + 10)
} else {
thread_rng().gen_range(1, account_keys_len)
};
let num_required_signatures = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, account_keys_len + 10) as u8
} else {
thread_rng().gen_range(1, std::cmp::max(2, num_signatures)) as u8
};
let num_readonly_signed_accounts = if thread_rng().gen_ratio(1, 5) {
thread_rng().gen_range(0, account_keys_len) as u8
} else {
let max = if num_required_signatures > 1 {
num_required_signatures - 1
} else {
1
};
thread_rng().gen_range(0, max) as u8
};
let num_readonly_unsigned_accounts = if thread_rng().gen_ratio(1, 5)
|| (num_required_signatures as usize) >= account_keys_len
{
thread_rng().gen_range(0, account_keys_len) as u8
} else {
thread_rng().gen_range(0, account_keys_len - num_required_signatures as usize) as u8
};
let header = MessageHeader {
num_required_signatures,
num_readonly_signed_accounts,
num_readonly_unsigned_accounts,
};
let message = Message {
header,
account_keys,
recent_blockhash: bank.last_blockhash(),
instructions,
};
let tx = Transaction {
signatures: vec![Signature::default(); num_signatures],
message,
};
let result = bank.process_transaction(&tx);
for (key, balance) in &keys {
assert_eq!(bank.get_balance(key), *balance);
}
for (key, name) in &program_keys {
let account = bank.get_account(key).unwrap();
assert!(account.executable());
assert_eq!(account.data(), name);
}
info!("result: {:?}", result);
let result_key = format!("{:?}", result);
*results.entry(result_key).or_insert(0) += 1;
}
info!("results: {:?}", results);
}
#[test]
fn test_bank_hash_consistency() {
solana_logger::setup();
let mut genesis_config = GenesisConfig::new(
&[(
Pubkey::new(&[42; 32]),
AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()),
)],
&[],
);
genesis_config.creation_time = 0;
genesis_config.cluster_type = ClusterType::MainnetBeta;
genesis_config.rent.burn_percent = 100;
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
// Check a few slots, cross an epoch boundary
assert_eq!(bank.get_slots_in_epoch(0), 32);
loop {
goto_end_of_slot(Arc::get_mut(&mut bank).unwrap());
if bank.slot == 0 {
assert_eq!(
bank.hash().to_string(),
"DqaWg7EVKzb5Fpe92zNBtXAWqLwcedgHDicYrCBnf3QK"
);
}
if bank.slot == 32 {
assert_eq!(
bank.hash().to_string(),
"AYdhzhKrM74r9XuZBDGcHeFzg2DEtp1boggnEnzDjZSq"
);
}
if bank.slot == 64 {
assert_eq!(
bank.hash().to_string(),
"EsbPVYzo1qz5reEUH5okKW4ExB6WbcidkVdW5mzpFn7C"
);
}
if bank.slot == 128 {
assert_eq!(
bank.hash().to_string(),
"H3DWrQ6FqbLkFNDxbWQ62UKRbw2dbuxf3oVF2VpBk6Ga"
);
break;
}
bank = Arc::new(new_from_parent(&bank));
}
}
#[test]
fn test_same_program_id_uses_unqiue_executable_accounts() {
fn nested_processor(
first_instruction_account: usize,
_data: &[u8],
invoke_context: &mut InvokeContext,
) -> result::Result<(), InstructionError> {
let keyed_accounts = invoke_context.get_keyed_accounts()?;
let account = keyed_account_at_index(keyed_accounts, first_instruction_account)?;
assert_eq!(42, account.lamports().unwrap());
account.try_account_ref_mut()?.checked_add_lamports(1)?;
Ok(())
}
let (genesis_config, mint_keypair) = create_genesis_config(50000);
let mut bank = Bank::new_for_tests(&genesis_config);
// Add a new program
let program1_pubkey = solana_sdk::pubkey::new_rand();
bank.add_builtin("program", &program1_pubkey, nested_processor);
// Add a new program owned by the first
let program2_pubkey = solana_sdk::pubkey::new_rand();
let mut program2_account = AccountSharedData::new(42, 1, &program1_pubkey);
program2_account.set_executable(true);
bank.store_account(&program2_pubkey, &program2_account);
let instruction = Instruction::new_with_bincode(program2_pubkey, &10, vec![]);
let tx = Transaction::new_signed_with_payer(
&[instruction.clone(), instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
assert!(bank.process_transaction(&tx).is_ok());
assert_eq!(1, bank.get_balance(&program1_pubkey));
assert_eq!(42, bank.get_balance(&program2_pubkey));
}
fn get_shrink_account_size() -> usize {
let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000_000);
// Set root for bank 0, with caching disabled so we can get the size
// of the storage for this slot
let mut bank0 = Arc::new(Bank::new_with_config(
&genesis_config,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
));
bank0.restore_old_behavior_for_fragile_tests();
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank0).unwrap());
bank0.freeze();
bank0.squash();
let sizes = bank0
.rc
.accounts
.scan_slot(0, |stored_account| Some(stored_account.stored_size()));
// Create an account such that it takes DEFAULT_ACCOUNTS_SHRINK_RATIO of the total account space for
// the slot, so when it gets pruned, the storage entry will become a shrink candidate.
let bank0_total_size: usize = sizes.into_iter().sum();
let pubkey0_size = (bank0_total_size as f64 / (1.0 - DEFAULT_ACCOUNTS_SHRINK_RATIO)).ceil();
assert!(
pubkey0_size / (pubkey0_size + bank0_total_size as f64) > DEFAULT_ACCOUNTS_SHRINK_RATIO
);
pubkey0_size as usize
}
#[test]
fn test_clean_nonrooted() {
solana_logger::setup();
let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000_000);
let pubkey0 = Pubkey::new(&[0; 32]);
let pubkey1 = Pubkey::new(&[1; 32]);
info!("pubkey0: {}", pubkey0);
info!("pubkey1: {}", pubkey1);
// Set root for bank 0, with caching enabled
let mut bank0 = Arc::new(Bank::new_with_config(
&genesis_config,
AccountSecondaryIndexes::default(),
true,
AccountShrinkThreshold::default(),
));
let account_zero = AccountSharedData::new(0, 0, &Pubkey::new_unique());
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank0).unwrap());
bank0.freeze();
bank0.squash();
// Flush now so that accounts cache cleaning doesn't clean up bank 0 when later
// slots add updates to the cache
bank0.force_flush_accounts_cache();
// Store some lamports in bank 1
let some_lamports = 123;
let mut bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
bank1.deposit(&pubkey0, some_lamports).unwrap();
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank1).unwrap());
bank1.freeze();
bank1.flush_accounts_cache_slot();
bank1.print_accounts_stats();
// Store some lamports for pubkey1 in bank 2, root bank 2
// bank2's parent is bank0
let mut bank2 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 2));
bank2.deposit(&pubkey1, some_lamports).unwrap();
bank2.store_account(&pubkey0, &account_zero);
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank2).unwrap());
bank2.freeze();
bank2.squash();
bank2.force_flush_accounts_cache();
bank2.print_accounts_stats();
drop(bank1);
// Clean accounts, which should add earlier slots to the shrink
// candidate set
bank2.clean_accounts(false, false, None);
let mut bank3 = Arc::new(Bank::new_from_parent(&bank2, &Pubkey::default(), 3));
bank3.deposit(&pubkey1, some_lamports + 1).unwrap();
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank3).unwrap());
bank3.freeze();
bank3.squash();
bank3.force_flush_accounts_cache();
bank3.clean_accounts(false, false, None);
assert_eq!(
bank3.rc.accounts.accounts_db.ref_count_for_pubkey(&pubkey0),
2
);
assert!(bank3
.rc
.accounts
.accounts_db
.storage
.get_slot_stores(1)
.is_none());
bank3.print_accounts_stats();
}
#[test]
fn test_shrink_candidate_slots_cached() {
solana_logger::setup();
let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000_000);
let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
// Set root for bank 0, with caching enabled
let mut bank0 = Arc::new(Bank::new_with_config(
&genesis_config,
AccountSecondaryIndexes::default(),
true,
AccountShrinkThreshold::default(),
));
bank0.restore_old_behavior_for_fragile_tests();
let pubkey0_size = get_shrink_account_size();
let account0 = AccountSharedData::new(1000, pubkey0_size as usize, &Pubkey::new_unique());
bank0.store_account(&pubkey0, &account0);
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank0).unwrap());
bank0.freeze();
bank0.squash();
// Flush now so that accounts cache cleaning doesn't clean up bank 0 when later
// slots add updates to the cache
bank0.force_flush_accounts_cache();
// Store some lamports in bank 1
let some_lamports = 123;
let mut bank1 = Arc::new(new_from_parent(&bank0));
bank1.deposit(&pubkey1, some_lamports).unwrap();
bank1.deposit(&pubkey2, some_lamports).unwrap();
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank1).unwrap());
bank1.freeze();
bank1.squash();
// Flush now so that accounts cache cleaning doesn't clean up bank 0 when later
// slots add updates to the cache
bank1.force_flush_accounts_cache();
// Store some lamports for pubkey1 in bank 2, root bank 2
let mut bank2 = Arc::new(new_from_parent(&bank1));
bank2.deposit(&pubkey1, some_lamports).unwrap();
bank2.store_account(&pubkey0, &account0);
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank2).unwrap());
bank2.freeze();
bank2.squash();
bank2.force_flush_accounts_cache();
// Clean accounts, which should add earlier slots to the shrink
// candidate set
bank2.clean_accounts(false, false, None);
// Slots 0 and 1 should be candidates for shrinking, but slot 2
// shouldn't because none of its accounts are outdated by a later
// root
assert_eq!(bank2.shrink_candidate_slots(), 2);
let alive_counts: Vec<usize> = (0..3)
.map(|slot| {
bank2
.rc
.accounts
.accounts_db
.alive_account_count_in_slot(slot)
})
.collect();
// No more slots should be shrunk
assert_eq!(bank2.shrink_candidate_slots(), 0);
// alive_counts represents the count of alive accounts in the three slots 0,1,2
assert_eq!(alive_counts, vec![10, 1, 7]);
}
#[test]
fn test_process_stale_slot_with_budget() {
solana_logger::setup();
let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000_000);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let mut bank = Arc::new(Bank::new_for_tests(&genesis_config));
bank.restore_old_behavior_for_fragile_tests();
assert_eq!(bank.process_stale_slot_with_budget(0, 0), 0);
assert_eq!(bank.process_stale_slot_with_budget(133, 0), 133);
assert_eq!(bank.process_stale_slot_with_budget(0, 100), 0);
assert_eq!(bank.process_stale_slot_with_budget(33, 100), 0);
assert_eq!(bank.process_stale_slot_with_budget(133, 100), 33);
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank).unwrap());
bank.squash();
let some_lamports = 123;
let mut bank = Arc::new(new_from_parent(&bank));
bank.deposit(&pubkey1, some_lamports).unwrap();
bank.deposit(&pubkey2, some_lamports).unwrap();
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank).unwrap());
let mut bank = Arc::new(new_from_parent(&bank));
bank.deposit(&pubkey1, some_lamports).unwrap();
goto_end_of_slot(Arc::<Bank>::get_mut(&mut bank).unwrap());
bank.squash();
bank.clean_accounts(false, false, None);
let force_to_return_alive_account = 0;
assert_eq!(
bank.process_stale_slot_with_budget(22, force_to_return_alive_account),
22
);
let consumed_budgets: usize = (0..3)
.map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account))
.sum();
// consumed_budgets represents the count of alive accounts in the three slots 0,1,2
assert_eq!(consumed_budgets, 11);
}
#[test]
fn test_add_builtin_no_overwrite() {
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
#[allow(clippy::unnecessary_wraps)]
fn mock_ix_processor(
_first_instruction_account: usize,
_data: &[u8],
_invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
Ok(())
}
let slot = 123;
let program_id = solana_sdk::pubkey::new_rand();
let mut bank = Arc::new(Bank::new_from_parent(
&Arc::new(Bank::new_for_tests(&genesis_config)),
&Pubkey::default(),
slot,
));
assert_eq!(bank.get_account_modified_slot(&program_id), None);
Arc::get_mut(&mut bank).unwrap().add_builtin(
"mock_program",
&program_id,
mock_ix_processor,
);
assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot);
let mut bank = Arc::new(new_from_parent(&bank));
Arc::get_mut(&mut bank).unwrap().add_builtin(
"mock_program",
&program_id,
mock_ix_processor,
);
assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot);
Arc::get_mut(&mut bank).unwrap().replace_builtin(
"mock_program v2",
&program_id,
mock_ix_processor,
);
assert_eq!(
bank.get_account_modified_slot(&program_id).unwrap().1,
bank.slot()
);
}
#[test]
fn test_add_builtin_loader_no_overwrite() {
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
#[allow(clippy::unnecessary_wraps)]
fn mock_ix_processor(
_first_instruction_account: usize,
_data: &[u8],
_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
Ok(())
}
let slot = 123;
let loader_id = solana_sdk::pubkey::new_rand();
let mut bank = Arc::new(Bank::new_from_parent(
&Arc::new(Bank::new_for_tests(&genesis_config)),
&Pubkey::default(),
slot,
));
assert_eq!(bank.get_account_modified_slot(&loader_id), None);
Arc::get_mut(&mut bank)
.unwrap()
.add_builtin("mock_program", &loader_id, mock_ix_processor);
assert_eq!(bank.get_account_modified_slot(&loader_id).unwrap().1, slot);
let mut bank = Arc::new(new_from_parent(&bank));
Arc::get_mut(&mut bank)
.unwrap()
.add_builtin("mock_program", &loader_id, mock_ix_processor);
assert_eq!(bank.get_account_modified_slot(&loader_id).unwrap().1, slot);
}
#[test]
fn test_add_builtin_account() {
let (mut genesis_config, _mint_keypair) = create_genesis_config(100_000);
activate_all_features(&mut genesis_config);
let slot = 123;
let program_id = solana_sdk::pubkey::new_rand();
let bank = Arc::new(Bank::new_from_parent(
&Arc::new(Bank::new_for_tests(&genesis_config)),
&Pubkey::default(),
slot,
));
assert_eq!(bank.get_account_modified_slot(&program_id), None);
assert_capitalization_diff(
&bank,
|| bank.add_builtin_account("mock_program", &program_id, false),
|old, new| {
assert_eq!(old + 1, new);
},
);
assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot);
let bank = Arc::new(new_from_parent(&bank));
assert_capitalization_diff(
&bank,
|| bank.add_builtin_account("mock_program", &program_id, false),
|old, new| assert_eq!(old, new),
);
assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot);
let bank = Arc::new(new_from_parent(&bank));
// When replacing builtin_program, name must change to disambiguate from repeated
// invocations.
assert_capitalization_diff(
&bank,
|| bank.add_builtin_account("mock_program v2", &program_id, true),
|old, new| assert_eq!(old, new),
);
assert_eq!(
bank.get_account_modified_slot(&program_id).unwrap().1,
bank.slot()
);
let bank = Arc::new(new_from_parent(&bank));
assert_capitalization_diff(
&bank,
|| bank.add_builtin_account("mock_program v2", &program_id, true),
|old, new| assert_eq!(old, new),
);
// replacing with same name shouldn't update account
assert_eq!(
bank.get_account_modified_slot(&program_id).unwrap().1,
bank.parent_slot()
);
}
#[test]
fn test_add_builtin_account_inherited_cap_while_replacing() {
let (genesis_config, mint_keypair) = create_genesis_config(100_000);
let bank = Bank::new_for_tests(&genesis_config);
let program_id = solana_sdk::pubkey::new_rand();
bank.add_builtin_account("mock_program", &program_id, false);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
// someone mess with program_id's balance
bank.withdraw(&mint_keypair.pubkey(), 10).unwrap();
assert_ne!(bank.capitalization(), bank.calculate_capitalization(true));
bank.deposit(&program_id, 10).unwrap();
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
bank.add_builtin_account("mock_program v2", &program_id, true);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
}
#[test]
fn test_add_builtin_account_squatted_while_not_replacing() {
let (genesis_config, mint_keypair) = create_genesis_config(100_000);
let bank = Bank::new_for_tests(&genesis_config);
let program_id = solana_sdk::pubkey::new_rand();
// someone managed to squat at program_id!
bank.withdraw(&mint_keypair.pubkey(), 10).unwrap();
assert_ne!(bank.capitalization(), bank.calculate_capitalization(true));
bank.deposit(&program_id, 10).unwrap();
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
bank.add_builtin_account("mock_program", &program_id, false);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
}
#[test]
#[should_panic(
expected = "Can't change frozen bank by adding not-existing new builtin \
program (mock_program, CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre). \
Maybe, inconsistent program activation is detected on snapshot restore?"
)]
fn test_add_builtin_account_after_frozen() {
use std::str::FromStr;
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
let slot = 123;
let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap();
let bank = Bank::new_from_parent(
&Arc::new(Bank::new_for_tests(&genesis_config)),
&Pubkey::default(),
slot,
);
bank.freeze();
bank.add_builtin_account("mock_program", &program_id, false);
}
#[test]
#[should_panic(
expected = "There is no account to replace with builtin program (mock_program, \
CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre)."
)]
fn test_add_builtin_account_replace_none() {
use std::str::FromStr;
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
let slot = 123;
let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap();
let bank = Bank::new_from_parent(
&Arc::new(Bank::new_for_tests(&genesis_config)),
&Pubkey::default(),
slot,
);
bank.add_builtin_account("mock_program", &program_id, true);
}
#[test]
fn test_add_precompiled_account() {
let (mut genesis_config, _mint_keypair) = create_genesis_config(100_000);
activate_all_features(&mut genesis_config);
let slot = 123;
let program_id = solana_sdk::pubkey::new_rand();
let bank = Arc::new(Bank::new_from_parent(
&Arc::new(Bank::new_for_tests(&genesis_config)),
&Pubkey::default(),
slot,
));
assert_eq!(bank.get_account_modified_slot(&program_id), None);
assert_capitalization_diff(
&bank,
|| bank.add_precompiled_account(&program_id),
|old, new| {
assert_eq!(old + 1, new);
},
);
assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot);
let bank = Arc::new(new_from_parent(&bank));
assert_capitalization_diff(
&bank,
|| bank.add_precompiled_account(&program_id),
|old, new| assert_eq!(old, new),
);
assert_eq!(bank.get_account_modified_slot(&program_id).unwrap().1, slot);
}
#[test]
fn test_add_precompiled_account_inherited_cap_while_replacing() {
let (genesis_config, mint_keypair) = create_genesis_config(100_000);
let bank = Bank::new_for_tests(&genesis_config);
let program_id = solana_sdk::pubkey::new_rand();
bank.add_precompiled_account(&program_id);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
// someone mess with program_id's balance
bank.withdraw(&mint_keypair.pubkey(), 10).unwrap();
assert_ne!(bank.capitalization(), bank.calculate_capitalization(true));
bank.deposit(&program_id, 10).unwrap();
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
bank.add_precompiled_account(&program_id);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
}
#[test]
fn test_add_precompiled_account_squatted_while_not_replacing() {
let (genesis_config, mint_keypair) = create_genesis_config(100_000);
let bank = Bank::new_for_tests(&genesis_config);
let program_id = solana_sdk::pubkey::new_rand();
// someone managed to squat at program_id!
bank.withdraw(&mint_keypair.pubkey(), 10).unwrap();
assert_ne!(bank.capitalization(), bank.calculate_capitalization(true));
bank.deposit(&program_id, 10).unwrap();
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
bank.add_precompiled_account(&program_id);
assert_eq!(bank.capitalization(), bank.calculate_capitalization(true));
}
#[test]
#[should_panic(
expected = "Can't change frozen bank by adding not-existing new precompiled \
program (CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre). \
Maybe, inconsistent program activation is detected on snapshot restore?"
)]
fn test_add_precompiled_account_after_frozen() {
use std::str::FromStr;
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
let slot = 123;
let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap();
let bank = Bank::new_from_parent(
&Arc::new(Bank::new_for_tests(&genesis_config)),
&Pubkey::default(),
slot,
);
bank.freeze();
bank.add_precompiled_account(&program_id);
}
#[test]
fn test_reconfigure_token2_native_mint() {
solana_logger::setup();
let mut genesis_config =
create_genesis_config_with_leader(5, &solana_sdk::pubkey::new_rand(), 0).genesis_config;
// ClusterType::Development - Native mint exists immediately
assert_eq!(genesis_config.cluster_type, ClusterType::Development);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(
bank.get_balance(&inline_spl_token::native_mint::id()),
1000000000
);
// Testnet - Native mint blinks into existence at epoch 93
genesis_config.cluster_type = ClusterType::Testnet;
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank.get_balance(&inline_spl_token::native_mint::id()), 0);
bank.deposit(&inline_spl_token::native_mint::id(), 4200000000)
.unwrap();
let bank = Bank::new_from_parent(
&bank,
&Pubkey::default(),
genesis_config.epoch_schedule.get_first_slot_in_epoch(93),
);
let native_mint_account = bank
.get_account(&inline_spl_token::native_mint::id())
.unwrap();
assert_eq!(native_mint_account.data().len(), 82);
assert_eq!(
bank.get_balance(&inline_spl_token::native_mint::id()),
4200000000
);
assert_eq!(native_mint_account.owner(), &inline_spl_token::id());
// MainnetBeta - Native mint blinks into existence at epoch 75
genesis_config.cluster_type = ClusterType::MainnetBeta;
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
assert_eq!(bank.get_balance(&inline_spl_token::native_mint::id()), 0);
bank.deposit(&inline_spl_token::native_mint::id(), 4200000000)
.unwrap();
let bank = Bank::new_from_parent(
&bank,
&Pubkey::default(),
genesis_config.epoch_schedule.get_first_slot_in_epoch(75),
);
let native_mint_account = bank
.get_account(&inline_spl_token::native_mint::id())
.unwrap();
assert_eq!(native_mint_account.data().len(), 82);
assert_eq!(
bank.get_balance(&inline_spl_token::native_mint::id()),
4200000000
);
assert_eq!(native_mint_account.owner(), &inline_spl_token::id());
}
#[test]
fn test_ensure_no_storage_rewards_pool() {
solana_logger::setup();
let mut genesis_config =
create_genesis_config_with_leader(5, &solana_sdk::pubkey::new_rand(), 0).genesis_config;
// Testnet - Storage rewards pool is purged at epoch 93
// Also this is with bad capitalization
genesis_config.cluster_type = ClusterType::Testnet;
genesis_config.inflation = Inflation::default();
let reward_pubkey = solana_sdk::pubkey::new_rand();
genesis_config.rewards_pools.insert(
reward_pubkey,
Account::new(u64::MAX, 0, &solana_sdk::pubkey::new_rand()),
);
let bank0 = Bank::new_for_tests(&genesis_config);
// because capitalization has been reset with bogus capitalization calculation allowing overflows,
// deliberately substract 1 lamport to simulate it
bank0.capitalization.fetch_sub(1, Relaxed);
let bank0 = Arc::new(bank0);
assert_eq!(bank0.get_balance(&reward_pubkey), u64::MAX,);
let bank1 = Bank::new_from_parent(
&bank0,
&Pubkey::default(),
genesis_config.epoch_schedule.get_first_slot_in_epoch(93),
);
// assert that everything gets in order....
assert!(bank1.get_account(&reward_pubkey).is_none());
let sysvar_and_builtin_program_delta = 1;
assert_eq!(
bank0.capitalization() + 1 + 1_000_000_000 + sysvar_and_builtin_program_delta,
bank1.capitalization()
);
assert_eq!(bank1.capitalization(), bank1.calculate_capitalization(true));
// Depending on RUSTFLAGS, this test exposes rust's checked math behavior or not...
// So do some convolted setup; anyway this test itself will just be temporary
let bank0 = std::panic::AssertUnwindSafe(bank0);
let overflowing_capitalization =
std::panic::catch_unwind(|| bank0.calculate_capitalization(true));
if let Ok(overflowing_capitalization) = overflowing_capitalization {
info!("asserting overflowing capitalization for bank0");
assert_eq!(overflowing_capitalization, bank0.capitalization());
} else {
info!("NOT-asserting overflowing capitalization for bank0");
}
}
#[derive(Debug)]
struct TestExecutor {}
impl Executor for TestExecutor {
fn execute<'a, 'b>(
&self,
_first_instruction_account: usize,
_instruction_data: &[u8],
_invoke_context: &'a mut InvokeContext<'b>,
_use_jit: bool,
) -> std::result::Result<(), InstructionError> {
Ok(())
}
}
#[test]
fn test_cached_executors() {
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let key3 = solana_sdk::pubkey::new_rand();
let key4 = solana_sdk::pubkey::new_rand();
let executor: Arc<dyn Executor> = Arc::new(TestExecutor {});
let mut cache = CachedExecutors::new(3, 0);
cache.put(&key1, executor.clone());
cache.put(&key2, executor.clone());
cache.put(&key3, executor.clone());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key2).is_some());
assert!(cache.get(&key3).is_some());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key2).is_some());
cache.put(&key4, executor.clone());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key2).is_some());
assert!(cache.get(&key3).is_none());
assert!(cache.get(&key4).is_some());
assert!(cache.get(&key4).is_some());
assert!(cache.get(&key4).is_some());
assert!(cache.get(&key4).is_some());
cache.put(&key3, executor.clone());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key2).is_none());
assert!(cache.get(&key3).is_some());
assert!(cache.get(&key4).is_some());
}
#[test]
fn test_cached_executors_eviction() {
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let key3 = solana_sdk::pubkey::new_rand();
let key4 = solana_sdk::pubkey::new_rand();
let executor: Arc<dyn Executor> = Arc::new(TestExecutor {});
let mut cache = CachedExecutors::new(3, 0);
assert!(cache.current_epoch == 0);
cache.put(&key1, executor.clone());
cache.put(&key2, executor.clone());
cache.put(&key3, executor.clone());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key1).is_some());
assert!(cache.get(&key1).is_some());
cache = cache.clone_with_epoch(1);
assert!(cache.current_epoch == 1);
assert!(cache.get(&key2).is_some());
assert!(cache.get(&key2).is_some());
assert!(cache.get(&key3).is_some());
cache.put(&key4, executor.clone());
assert!(cache.get(&key4).is_some());
assert!(cache.get(&key3).is_none());
cache = cache.clone_with_epoch(2);
assert!(cache.current_epoch == 2);
cache.put(&key3, executor.clone());
assert!(cache.get(&key3).is_some());
}
#[test]
fn test_bank_executor_cache() {
solana_logger::setup();
let (genesis_config, _) = create_genesis_config(1);
let bank = Bank::new_for_tests(&genesis_config);
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let key3 = solana_sdk::pubkey::new_rand();
let key4 = solana_sdk::pubkey::new_rand();
let executor: Arc<dyn Executor> = Arc::new(TestExecutor {});
let message = Message {
header: MessageHeader {
num_required_signatures: 1,
num_readonly_signed_accounts: 0,
num_readonly_unsigned_accounts: 1,
},
account_keys: vec![key1, key2],
recent_blockhash: Hash::default(),
instructions: vec![],
}
.try_into()
.unwrap();
let program_indices = &[vec![0, 1], vec![2]];
let accounts = &[
(key3, AccountSharedData::default()),
(key4, AccountSharedData::default()),
(key1, AccountSharedData::default()),
];
// don't do any work if not dirty
let mut executors = Executors::default();
executors.insert(key1, executor.clone());
executors.insert(key2, executor.clone());
executors.insert(key3, executor.clone());
executors.insert(key4, executor.clone());
let executors = Rc::new(RefCell::new(executors));
executors.borrow_mut().is_dirty = false;
bank.update_executors(executors);
let executors = bank.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 0);
// do work
let mut executors = Executors::default();
executors.insert(key1, executor.clone());
executors.insert(key2, executor.clone());
executors.insert(key3, executor.clone());
executors.insert(key4, executor.clone());
let executors = Rc::new(RefCell::new(executors));
bank.update_executors(executors);
let executors = bank.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 4);
assert!(executors.borrow().executors.contains_key(&key1));
assert!(executors.borrow().executors.contains_key(&key2));
assert!(executors.borrow().executors.contains_key(&key3));
assert!(executors.borrow().executors.contains_key(&key4));
// Check inheritance
let bank = Bank::new_from_parent(&Arc::new(bank), &solana_sdk::pubkey::new_rand(), 1);
let executors = bank.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 4);
assert!(executors.borrow().executors.contains_key(&key1));
assert!(executors.borrow().executors.contains_key(&key2));
assert!(executors.borrow().executors.contains_key(&key3));
assert!(executors.borrow().executors.contains_key(&key4));
bank.remove_executor(&key1);
bank.remove_executor(&key2);
bank.remove_executor(&key3);
bank.remove_executor(&key4);
let executors = bank.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 0);
assert!(!executors.borrow().executors.contains_key(&key1));
assert!(!executors.borrow().executors.contains_key(&key2));
assert!(!executors.borrow().executors.contains_key(&key3));
assert!(!executors.borrow().executors.contains_key(&key4));
}
#[test]
fn test_bank_executor_cow() {
solana_logger::setup();
let (genesis_config, _) = create_genesis_config(1);
let root = Arc::new(Bank::new_for_tests(&genesis_config));
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let executor: Arc<dyn Executor> = Arc::new(TestExecutor {});
let message =
SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap();
let program_indices = &[vec![0, 1]];
let accounts = &[
(key1, AccountSharedData::default()),
(key2, AccountSharedData::default()),
];
// add one to root bank
let mut executors = Executors::default();
executors.insert(key1, executor.clone());
let executors = Rc::new(RefCell::new(executors));
root.update_executors(executors);
let executors = root.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 1);
let fork1 = Bank::new_from_parent(&root, &Pubkey::default(), 1);
let fork2 = Bank::new_from_parent(&root, &Pubkey::default(), 1);
let executors = fork1.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 1);
let executors = fork2.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 1);
let mut executors = Executors::default();
executors.insert(key2, executor.clone());
let executors = Rc::new(RefCell::new(executors));
fork1.update_executors(executors);
let executors = fork1.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 2);
let executors = fork2.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 1);
fork1.remove_executor(&key1);
let executors = fork1.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 1);
let executors = fork2.get_executors(&message, accounts, program_indices);
assert_eq!(executors.borrow().executors.len(), 1);
}
#[test]
fn test_compute_active_feature_set() {
let (genesis_config, _mint_keypair) = create_genesis_config(100_000);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
let mut bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
let test_feature = "TestFeature11111111111111111111111111111111"
.parse::<Pubkey>()
.unwrap();
let mut feature_set = FeatureSet::default();
feature_set.inactive.insert(test_feature);
bank.feature_set = Arc::new(feature_set.clone());
let new_activations = bank.compute_active_feature_set(true);
assert!(new_activations.is_empty());
assert!(!bank.feature_set.is_active(&test_feature));
// Depositing into the `test_feature` account should do nothing
bank.deposit(&test_feature, 42).unwrap();
let new_activations = bank.compute_active_feature_set(true);
assert!(new_activations.is_empty());
assert!(!bank.feature_set.is_active(&test_feature));
// Request `test_feature` activation
let feature = Feature::default();
assert_eq!(feature.activated_at, None);
bank.store_account(&test_feature, &feature::create_account(&feature, 42));
// Run `compute_active_feature_set` disallowing new activations
let new_activations = bank.compute_active_feature_set(false);
assert!(new_activations.is_empty());
assert!(!bank.feature_set.is_active(&test_feature));
let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account"))
.expect("from_account");
assert_eq!(feature.activated_at, None);
// Run `compute_active_feature_set` allowing new activations
let new_activations = bank.compute_active_feature_set(true);
assert_eq!(new_activations.len(), 1);
assert!(bank.feature_set.is_active(&test_feature));
let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account"))
.expect("from_account");
assert_eq!(feature.activated_at, Some(1));
// Reset the bank's feature set
bank.feature_set = Arc::new(feature_set);
assert!(!bank.feature_set.is_active(&test_feature));
// Running `compute_active_feature_set` will not cause new activations, but
// `test_feature` is now be active
let new_activations = bank.compute_active_feature_set(true);
assert!(new_activations.is_empty());
assert!(bank.feature_set.is_active(&test_feature));
}
#[test]
fn test_spl_token_replacement() {
let (genesis_config, _mint_keypair) = create_genesis_config(0);
let mut bank = Bank::new_for_tests(&genesis_config);
// Setup original token account
bank.store_account_and_update_capitalization(
&inline_spl_token::id(),
&AccountSharedData::from(Account {
lamports: 100,
..Account::default()
}),
);
assert_eq!(bank.get_balance(&inline_spl_token::id()), 100);
// Setup new token account
let new_token_account = AccountSharedData::from(Account {
lamports: 123,
..Account::default()
});
bank.store_account_and_update_capitalization(
&inline_spl_token::new_token_program::id(),
&new_token_account,
);
assert_eq!(
bank.get_balance(&inline_spl_token::new_token_program::id()),
123
);
let original_capitalization = bank.capitalization();
bank.apply_spl_token_v3_3_0_release();
// New token account is now empty
assert_eq!(
bank.get_balance(&inline_spl_token::new_token_program::id()),
0
);
// Old token account holds the new token account
assert_eq!(
bank.get_account(&inline_spl_token::id()),
Some(new_token_account)
);
// Lamports in the old token account were burnt
assert_eq!(bank.capitalization(), original_capitalization - 100);
}
pub fn update_vote_account_timestamp(
timestamp: BlockTimestamp,
bank: &Bank,
vote_pubkey: &Pubkey,
) {
let mut vote_account = bank.get_account(vote_pubkey).unwrap_or_default();
let mut vote_state = VoteState::from(&vote_account).unwrap_or_default();
vote_state.last_timestamp = timestamp;
let versioned = VoteStateVersions::new_current(vote_state);
VoteState::to(&versioned, &mut vote_account).unwrap();
bank.store_account(vote_pubkey, &vote_account);
}
fn min_rent_excempt_balance_for_sysvars(bank: &Bank, sysvar_ids: &[Pubkey]) -> u64 {
sysvar_ids
.iter()
.map(|sysvar_id| {
trace!("min_rent_excempt_balance_for_sysvars: {}", sysvar_id);
bank.get_minimum_balance_for_rent_exemption(
bank.get_account(sysvar_id).unwrap().data().len(),
)
})
.sum()
}
fn expected_cap_delta_after_sysvar_reset(bank: &Bank, sysvar_ids: &[Pubkey]) -> u64 {
min_rent_excempt_balance_for_sysvars(bank, sysvar_ids) - sysvar_ids.len() as u64
}
#[test]
fn test_adjust_sysvar_balance_for_rent() {
let (genesis_config, _mint_keypair) = create_genesis_config(0);
let bank = Bank::new_for_tests(&genesis_config);
let mut smaller_sample_sysvar = bank.get_account(&sysvar::clock::id()).unwrap();
assert_eq!(smaller_sample_sysvar.lamports(), 1);
bank.adjust_sysvar_balance_for_rent(&mut smaller_sample_sysvar);
assert_eq!(
smaller_sample_sysvar.lamports(),
bank.get_minimum_balance_for_rent_exemption(smaller_sample_sysvar.data().len()),
);
let mut bigger_sample_sysvar = AccountSharedData::new(
1,
smaller_sample_sysvar.data().len() + 1,
&Pubkey::default(),
);
bank.adjust_sysvar_balance_for_rent(&mut bigger_sample_sysvar);
assert!(smaller_sample_sysvar.lamports() < bigger_sample_sysvar.lamports());
// excess lamports shouldn't be reduced by adjust_sysvar_balance_for_rent()
let excess_lamports = smaller_sample_sysvar.lamports() + 999;
smaller_sample_sysvar.set_lamports(excess_lamports);
bank.adjust_sysvar_balance_for_rent(&mut smaller_sample_sysvar);
assert_eq!(smaller_sample_sysvar.lamports(), excess_lamports);
}
// this test can be removed after rent_for_sysvars activation on mainnet-beta
#[test]
fn test_no_deletion_due_to_rent_upon_rent_for_sysvar_activation() {
solana_logger::setup();
let (mut genesis_config, _mint_keypair) = create_genesis_config(0);
let feature_balance =
std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1);
// activate all features but rent_for_sysvars
activate_all_features(&mut genesis_config);
genesis_config
.accounts
.remove(&feature_set::rent_for_sysvars::id());
let bank0 = Bank::new_for_tests(&genesis_config);
let bank1 = Arc::new(new_from_parent(&Arc::new(bank0)));
// schedule activation of simple capitalization
bank1.store_account_and_update_capitalization(
&feature_set::rent_for_sysvars::id(),
&feature::create_account(&Feature { activated_at: None }, feature_balance),
);
let bank2 =
Bank::new_from_parent(&bank1, &Pubkey::default(), bank1.first_slot_in_next_epoch());
assert_eq!(
bank2
.get_program_accounts(&sysvar::id(), &ScanConfig::default(),)
.unwrap()
.len(),
8
);
// force rent collection for sysvars
bank2.collect_rent_in_partition((0, 0, 1)); // all range
// no sysvar should be deleted due to rent
assert_eq!(
bank2
.get_program_accounts(&sysvar::id(), &ScanConfig::default(),)
.unwrap()
.len(),
8
);
}
// this test can be removed after rent_for_sysvars activation on mainnet-beta
#[test]
fn test_rent_for_sysvars_adjustment_minimum_genesis_set() {
solana_logger::setup();
let (mut genesis_config, _mint_keypair) = create_genesis_config(0);
let feature_balance =
std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1);
// inhibit deprecated rewards sysvar creation altogether
genesis_config.accounts.insert(
feature_set::deprecate_rewards_sysvar::id(),
Account::from(feature::create_account(
&Feature {
activated_at: Some(0),
},
feature_balance,
)),
);
let bank0 = Bank::new_for_tests(&genesis_config);
let bank1 = Arc::new(new_from_parent(&Arc::new(bank0)));
// schedule activation of rent_for_sysvars
bank1.store_account_and_update_capitalization(
&feature_set::rent_for_sysvars::id(),
&feature::create_account(&Feature { activated_at: None }, feature_balance),
);
{
let sysvars = bank1
.get_program_accounts(&sysvar::id(), &ScanConfig::default())
.unwrap();
assert_eq!(sysvars.len(), 8);
assert!(sysvars
.iter()
.map(|(_pubkey, account)| account.lamports())
.all(|lamports| lamports == 1));
}
// 8 sysvars should be reset by reset_all_sysvar_balances()
let bank2 = assert_capitalization_diff_with_new_bank(
&bank1,
|| Bank::new_from_parent(&bank1, &Pubkey::default(), bank1.first_slot_in_next_epoch()),
|old, new| {
assert_eq!(
old + expected_cap_delta_after_sysvar_reset(
&bank1,
&[
sysvar::clock::id(),
sysvar::epoch_schedule::id(),
#[allow(deprecated)]
sysvar::fees::id(),
#[allow(deprecated)]
sysvar::recent_blockhashes::id(),
sysvar::rent::id(),
sysvar::slot_hashes::id(),
sysvar::slot_history::id(),
sysvar::stake_history::id(),
]
),
new
)
},
);
{
let sysvars = bank2
.get_program_accounts(&sysvar::id(), &ScanConfig::default())
.unwrap();
assert_eq!(sysvars.len(), 8);
assert!(sysvars
.iter()
.map(|(_pubkey, account)| account.lamports())
.all(|lamports| lamports > 1));
}
}
// this test can be removed after rent_for_sysvars activation on mainnet-beta
#[test]
fn test_rent_for_sysvars_adjustment_full_set() {
solana_logger::setup();
let (mut genesis_config, _mint_keypair) = create_genesis_config(0);
let feature_balance =
std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1);
// activate all features but rent_for_sysvars
activate_all_features(&mut genesis_config);
genesis_config
.accounts
.remove(&feature_set::rent_for_sysvars::id());
// intentionally create deprecated rewards sysvar creation
genesis_config
.accounts
.remove(&feature_set::deprecate_rewards_sysvar::id());
// intentionally create bogus builtin programs
#[allow(clippy::unnecessary_wraps)]
fn mock_process_instruction(
_first_instruction_account: usize,
_data: &[u8],
_invoke_context: &mut InvokeContext,
) -> std::result::Result<(), solana_sdk::instruction::InstructionError> {
Ok(())
}
let builtins = Builtins {
genesis_builtins: vec![
Builtin::new(
"mock bpf",
solana_sdk::bpf_loader::id(),
mock_process_instruction,
),
Builtin::new(
"mock bpf",
solana_sdk::bpf_loader_deprecated::id(),
mock_process_instruction,
),
],
feature_builtins: (vec![]),
};
let bank0 = Arc::new(Bank::new_with_paths_for_tests(
&genesis_config,
Vec::new(),
None,
Some(&builtins),
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
false,
));
// move to next epoch to create now deprecated rewards sysvar intentionally
let bank1 = Arc::new(Bank::new_from_parent(
&bank0,
&Pubkey::default(),
bank0.first_slot_in_next_epoch(),
));
// schedule activation of simple capitalization
bank1.store_account_and_update_capitalization(
&feature_set::rent_for_sysvars::id(),
&feature::create_account(&Feature { activated_at: None }, feature_balance),
);
{
let sysvars = bank1
.get_program_accounts(&sysvar::id(), &ScanConfig::default())
.unwrap();
assert_eq!(sysvars.len(), 9);
assert!(sysvars
.iter()
.map(|(_pubkey, account)| account.lamports())
.all(|lamports| lamports == 1));
}
// 9 sysvars should be reset by reset_all_sysvar_balances()
let bank2 = assert_capitalization_diff_with_new_bank(
&bank1,
|| Bank::new_from_parent(&bank1, &Pubkey::default(), bank1.first_slot_in_next_epoch()),
|old, new| {
assert_eq!(
old + expected_cap_delta_after_sysvar_reset(
&bank1,
&[
sysvar::clock::id(),
sysvar::epoch_schedule::id(),
#[allow(deprecated)]
sysvar::fees::id(),
#[allow(deprecated)]
sysvar::recent_blockhashes::id(),
sysvar::rent::id(),
sysvar::rewards::id(),
sysvar::slot_hashes::id(),
sysvar::slot_history::id(),
sysvar::stake_history::id(),
]
),
new
)
},
);
{
let sysvars = bank2
.get_program_accounts(&sysvar::id(), &ScanConfig::default())
.unwrap();
assert_eq!(sysvars.len(), 9);
assert!(sysvars
.iter()
.map(|(_pubkey, account)| account.lamports())
.all(|lamports| lamports > 1));
}
}
#[test]
fn test_update_clock_timestamp() {
let leader_pubkey = solana_sdk::pubkey::new_rand();
let GenesisConfigInfo {
genesis_config,
voting_keypair,
..
} = create_genesis_config_with_leader(5, &leader_pubkey, 3);
let mut bank = Bank::new_for_tests(&genesis_config);
// Advance past slot 0, which has special handling.
bank = new_from_parent(&Arc::new(bank));
bank = new_from_parent(&Arc::new(bank));
assert_eq!(
bank.clock().unix_timestamp,
bank.unix_timestamp_from_genesis()
);
bank.update_clock(None);
assert_eq!(
bank.clock().unix_timestamp,
bank.unix_timestamp_from_genesis()
);
update_vote_account_timestamp(
BlockTimestamp {
slot: bank.slot(),
timestamp: bank.unix_timestamp_from_genesis() - 1,
},
&bank,
&voting_keypair.pubkey(),
);
bank.update_clock(None);
assert_eq!(
bank.clock().unix_timestamp,
bank.unix_timestamp_from_genesis()
);
update_vote_account_timestamp(
BlockTimestamp {
slot: bank.slot(),
timestamp: bank.unix_timestamp_from_genesis(),
},
&bank,
&voting_keypair.pubkey(),
);
bank.update_clock(None);
assert_eq!(
bank.clock().unix_timestamp,
bank.unix_timestamp_from_genesis()
);
update_vote_account_timestamp(
BlockTimestamp {
slot: bank.slot(),
timestamp: bank.unix_timestamp_from_genesis() + 1,
},
&bank,
&voting_keypair.pubkey(),
);
bank.update_clock(None);
assert_eq!(
bank.clock().unix_timestamp,
bank.unix_timestamp_from_genesis() + 1
);
// Timestamp cannot go backward from ancestor Bank to child
bank = new_from_parent(&Arc::new(bank));
update_vote_account_timestamp(
BlockTimestamp {
slot: bank.slot(),
timestamp: bank.unix_timestamp_from_genesis() - 1,
},
&bank,
&voting_keypair.pubkey(),
);
bank.update_clock(None);
assert_eq!(
bank.clock().unix_timestamp,
bank.unix_timestamp_from_genesis()
);
}
fn poh_estimate_offset(bank: &Bank) -> Duration {
let mut epoch_start_slot = bank.epoch_schedule.get_first_slot_in_epoch(bank.epoch());
if epoch_start_slot == bank.slot() {
epoch_start_slot = bank
.epoch_schedule
.get_first_slot_in_epoch(bank.epoch() - 1);
}
bank.slot().saturating_sub(epoch_start_slot) as u32
* Duration::from_nanos(bank.ns_per_slot as u64)
}
#[test]
fn test_warp_timestamp_again_feature_slow() {
fn max_allowable_delta_since_epoch(bank: &Bank, max_allowable_drift: u32) -> i64 {
let poh_estimate_offset = poh_estimate_offset(bank);
(poh_estimate_offset.as_secs()
+ (poh_estimate_offset * max_allowable_drift / 100).as_secs()) as i64
}
let leader_pubkey = solana_sdk::pubkey::new_rand();
let GenesisConfigInfo {
mut genesis_config,
voting_keypair,
..
} = create_genesis_config_with_leader(5, &leader_pubkey, 3);
let slots_in_epoch = 32;
genesis_config
.accounts
.remove(&feature_set::warp_timestamp_again::id())
.unwrap();
genesis_config.epoch_schedule = EpochSchedule::new(slots_in_epoch);
let mut bank = Bank::new_for_tests(&genesis_config);
let recent_timestamp: UnixTimestamp = bank.unix_timestamp_from_genesis();
let additional_secs = 8; // Greater than MAX_ALLOWABLE_DRIFT_PERCENTAGE for full epoch
update_vote_account_timestamp(
BlockTimestamp {
slot: bank.slot(),
timestamp: recent_timestamp + additional_secs,
},
&bank,
&voting_keypair.pubkey(),
);
// additional_secs greater than MAX_ALLOWABLE_DRIFT_PERCENTAGE for an epoch
// timestamp bounded to 50% deviation
for _ in 0..31 {
bank = new_from_parent(&Arc::new(bank));
assert_eq!(
bank.clock().unix_timestamp,
bank.clock().epoch_start_timestamp
+ max_allowable_delta_since_epoch(&bank, MAX_ALLOWABLE_DRIFT_PERCENTAGE),
);
assert_eq!(bank.clock().epoch_start_timestamp, recent_timestamp);
}
// Request `warp_timestamp_again` activation
let feature = Feature { activated_at: None };
bank.store_account(
&feature_set::warp_timestamp_again::id(),
&feature::create_account(&feature, 42),
);
let previous_epoch_timestamp = bank.clock().epoch_start_timestamp;
let previous_timestamp = bank.clock().unix_timestamp;
// Advance to epoch boundary to activate; time is warped to estimate with no bounding
bank = new_from_parent(&Arc::new(bank));
assert_ne!(bank.clock().epoch_start_timestamp, previous_timestamp);
assert!(
bank.clock().epoch_start_timestamp
> previous_epoch_timestamp
+ max_allowable_delta_since_epoch(&bank, MAX_ALLOWABLE_DRIFT_PERCENTAGE)
);
// Refresh vote timestamp
let recent_timestamp: UnixTimestamp = bank.clock().unix_timestamp;
let additional_secs = 8;
update_vote_account_timestamp(
BlockTimestamp {
slot: bank.slot(),
timestamp: recent_timestamp + additional_secs,
},
&bank,
&voting_keypair.pubkey(),
);
// additional_secs greater than MAX_ALLOWABLE_DRIFT_PERCENTAGE for 22 slots
// timestamp bounded to 80% deviation
for _ in 0..23 {
bank = new_from_parent(&Arc::new(bank));
assert_eq!(
bank.clock().unix_timestamp,
bank.clock().epoch_start_timestamp
+ max_allowable_delta_since_epoch(&bank, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW),
);
assert_eq!(bank.clock().epoch_start_timestamp, recent_timestamp);
}
for _ in 0..8 {
bank = new_from_parent(&Arc::new(bank));
assert_eq!(
bank.clock().unix_timestamp,
bank.clock().epoch_start_timestamp
+ poh_estimate_offset(&bank).as_secs() as i64
+ additional_secs,
);
assert_eq!(bank.clock().epoch_start_timestamp, recent_timestamp);
}
}
#[test]
fn test_timestamp_fast() {
fn max_allowable_delta_since_epoch(bank: &Bank, max_allowable_drift: u32) -> i64 {
let poh_estimate_offset = poh_estimate_offset(bank);
(poh_estimate_offset.as_secs()
- (poh_estimate_offset * max_allowable_drift / 100).as_secs()) as i64
}
let leader_pubkey = solana_sdk::pubkey::new_rand();
let GenesisConfigInfo {
mut genesis_config,
voting_keypair,
..
} = create_genesis_config_with_leader(5, &leader_pubkey, 3);
let slots_in_epoch = 32;
genesis_config.epoch_schedule = EpochSchedule::new(slots_in_epoch);
let mut bank = Bank::new_for_tests(&genesis_config);
let recent_timestamp: UnixTimestamp = bank.unix_timestamp_from_genesis();
let additional_secs = 5; // Greater than MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST for full epoch
update_vote_account_timestamp(
BlockTimestamp {
slot: bank.slot(),
timestamp: recent_timestamp - additional_secs,
},
&bank,
&voting_keypair.pubkey(),
);
// additional_secs greater than MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST for an epoch
// timestamp bounded to 25% deviation
for _ in 0..31 {
bank = new_from_parent(&Arc::new(bank));
assert_eq!(
bank.clock().unix_timestamp,
bank.clock().epoch_start_timestamp
+ max_allowable_delta_since_epoch(&bank, MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST),
);
assert_eq!(bank.clock().epoch_start_timestamp, recent_timestamp);
}
}
#[test]
fn test_program_is_native_loader() {
let (genesis_config, mint_keypair) = create_genesis_config(50000);
let bank = Bank::new_for_tests(&genesis_config);
let tx = Transaction::new_signed_with_payer(
&[Instruction::new_with_bincode(
native_loader::id(),
&(),
vec![],
)],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::InstructionError(
0,
InstructionError::UnsupportedProgramId
))
);
}
#[test]
fn test_debug_bank() {
let (genesis_config, _mint_keypair) = create_genesis_config(50000);
let mut bank = Bank::new_for_tests(&genesis_config);
bank.finish_init(&genesis_config, None, false);
let debug = format!("{:#?}", bank);
assert!(!debug.is_empty());
}
#[derive(Debug)]
enum AcceptableScanResults {
DroppedSlotError,
NoFailure,
Both,
}
fn test_store_scan_consistency<F: 'static>(
accounts_db_caching_enabled: bool,
update_f: F,
drop_callback: Option<Box<dyn DropCallback + Send + Sync>>,
acceptable_scan_results: AcceptableScanResults,
) where
F: Fn(
Arc<Bank>,
crossbeam_channel::Sender<Arc<Bank>>,
crossbeam_channel::Receiver<BankId>,
Arc<HashSet<Pubkey>>,
Pubkey,
u64,
) + std::marker::Send,
{
solana_logger::setup();
// Set up initial bank
let mut genesis_config = create_genesis_config_with_leader(
10,
&solana_sdk::pubkey::new_rand(),
374_999_998_287_840,
)
.genesis_config;
genesis_config.rent = Rent::free();
let bank0 = Arc::new(Bank::new_with_config(
&genesis_config,
AccountSecondaryIndexes::default(),
accounts_db_caching_enabled,
AccountShrinkThreshold::default(),
));
bank0.set_callback(drop_callback);
// Set up pubkeys to write to
let total_pubkeys = ITER_BATCH_SIZE * 10;
let total_pubkeys_to_modify = 10;
let all_pubkeys: Vec<Pubkey> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(total_pubkeys)
.collect();
let program_id = system_program::id();
let starting_lamports = 1;
let starting_account = AccountSharedData::new(starting_lamports, 0, &program_id);
// Write accounts to the store
for key in &all_pubkeys {
bank0.store_account(key, &starting_account);
}
// Set aside a subset of accounts to modify
let pubkeys_to_modify: Arc<HashSet<Pubkey>> = Arc::new(
all_pubkeys
.into_iter()
.take(total_pubkeys_to_modify)
.collect(),
);
let exit = Arc::new(AtomicBool::new(false));
// Thread that runs scan and constantly checks for
// consistency
let pubkeys_to_modify_ = pubkeys_to_modify.clone();
// Channel over which the bank to scan is sent
let (bank_to_scan_sender, bank_to_scan_receiver): (
crossbeam_channel::Sender<Arc<Bank>>,
crossbeam_channel::Receiver<Arc<Bank>>,
) = bounded(1);
let (scan_finished_sender, scan_finished_receiver): (
crossbeam_channel::Sender<BankId>,
crossbeam_channel::Receiver<BankId>,
) = unbounded();
let num_banks_scanned = Arc::new(AtomicU64::new(0));
let scan_thread = {
let exit = exit.clone();
let num_banks_scanned = num_banks_scanned.clone();
Builder::new()
.name("scan".to_string())
.spawn(move || {
loop {
info!("starting scan iteration");
if exit.load(Relaxed) {
info!("scan exiting");
return;
}
if let Ok(bank_to_scan) =
bank_to_scan_receiver.recv_timeout(Duration::from_millis(10))
{
info!("scanning program accounts for slot {}", bank_to_scan.slot());
let accounts_result = bank_to_scan
.get_program_accounts(&program_id, &ScanConfig::default());
let _ = scan_finished_sender.send(bank_to_scan.bank_id());
num_banks_scanned.fetch_add(1, Relaxed);
match (&acceptable_scan_results, accounts_result.is_err()) {
(AcceptableScanResults::DroppedSlotError, _)
| (AcceptableScanResults::Both, true) => {
assert_eq!(
accounts_result,
Err(ScanError::SlotRemoved {
slot: bank_to_scan.slot(),
bank_id: bank_to_scan.bank_id()
})
);
}
(AcceptableScanResults::NoFailure, _)
| (AcceptableScanResults::Both, false) => {
assert!(accounts_result.is_ok())
}
}
// Should never see empty accounts because no slot ever deleted
// any of the original accounts, and the scan should reflect the
// account state at some frozen slot `X` (no partial updates).
if let Ok(accounts) = accounts_result {
assert!(!accounts.is_empty());
let mut expected_lamports = None;
let mut target_accounts_found = HashSet::new();
for (pubkey, account) in accounts {
let account_balance = account.lamports();
if pubkeys_to_modify_.contains(&pubkey) {
target_accounts_found.insert(pubkey);
if let Some(expected_lamports) = expected_lamports {
assert_eq!(account_balance, expected_lamports);
} else {
// All pubkeys in the specified set should have the same balance
expected_lamports = Some(account_balance);
}
}
}
// Should've found all the accounts, i.e. no partial cleans should
// be detected
assert_eq!(target_accounts_found.len(), total_pubkeys_to_modify);
}
}
}
})
.unwrap()
};
// Thread that constantly updates the accounts, sets
// roots, and cleans
let update_thread = Builder::new()
.name("update".to_string())
.spawn(move || {
update_f(
bank0,
bank_to_scan_sender,
scan_finished_receiver,
pubkeys_to_modify,
program_id,
starting_lamports,
);
})
.unwrap();
// Let threads run for a while, check the scans didn't see any mixed slots
let min_expected_number_of_scans = 5;
std::thread::sleep(Duration::new(5, 0));
let mut remaining_loops = 1000;
loop {
if num_banks_scanned.load(Relaxed) > min_expected_number_of_scans {
break;
} else {
std::thread::sleep(Duration::from_millis(100));
}
remaining_loops -= 1;
if remaining_loops == 0 {
break; // just quit and try to get the thread result (panic, etc.)
}
}
exit.store(true, Relaxed);
scan_thread.join().unwrap();
update_thread.join().unwrap();
assert!(remaining_loops > 0, "test timed out");
}
#[test]
fn test_store_scan_consistency_unrooted() {
for accounts_db_caching_enabled in &[false, true] {
let (pruned_banks_sender, pruned_banks_receiver) = unbounded();
let abs_request_handler = AbsRequestHandler {
snapshot_request_handler: None,
pruned_banks_receiver,
};
test_store_scan_consistency(
*accounts_db_caching_enabled,
move |bank0,
bank_to_scan_sender,
_scan_finished_receiver,
pubkeys_to_modify,
program_id,
starting_lamports| {
let mut current_major_fork_bank = bank0;
loop {
let mut current_minor_fork_bank = current_major_fork_bank.clone();
let num_new_banks = 2;
let lamports = current_minor_fork_bank.slot() + starting_lamports + 1;
// Modify banks on the two banks on the minor fork
for pubkeys_to_modify in &pubkeys_to_modify
.iter()
.chunks(pubkeys_to_modify.len() / num_new_banks)
{
current_minor_fork_bank = Arc::new(Bank::new_from_parent(
¤t_minor_fork_bank,
&solana_sdk::pubkey::new_rand(),
current_minor_fork_bank.slot() + 2,
));
let account = AccountSharedData::new(lamports, 0, &program_id);
// Write partial updates to each of the banks in the minor fork so if any of them
// get cleaned up, there will be keys with the wrong account value/missing.
for key in pubkeys_to_modify {
current_minor_fork_bank.store_account(key, &account);
}
current_minor_fork_bank.freeze();
}
// All the parent banks made in this iteration of the loop
// are currently discoverable, previous parents should have
// been squashed
assert_eq!(
current_minor_fork_bank.clone().parents_inclusive().len(),
num_new_banks + 1,
);
// `next_major_bank` needs to be sandwiched between the minor fork banks
// That way, after the squash(), the minor fork has the potential to see a
// *partial* clean of the banks < `next_major_bank`.
current_major_fork_bank = Arc::new(Bank::new_from_parent(
¤t_major_fork_bank,
&solana_sdk::pubkey::new_rand(),
current_minor_fork_bank.slot() - 1,
));
let lamports = current_major_fork_bank.slot() + starting_lamports + 1;
let account = AccountSharedData::new(lamports, 0, &program_id);
for key in pubkeys_to_modify.iter() {
// Store rooted updates to these pubkeys such that the minor
// fork updates to the same keys will be deleted by clean
current_major_fork_bank.store_account(key, &account);
}
// Send the last new bank to the scan thread to perform the scan.
// Meanwhile this thread will continually set roots on a separate fork
// and squash/clean, purging the account entries from the minor forks
/*
bank 0
/ \
minor bank 1 \
/ current_major_fork_bank
minor bank 2
*/
// The capacity of the channel is 1 so that this thread will wait for the scan to finish before starting
// the next iteration, allowing the scan to stay in sync with these updates
// such that every scan will see this interruption.
if bank_to_scan_sender.send(current_minor_fork_bank).is_err() {
// Channel was disconnected, exit
return;
}
current_major_fork_bank.freeze();
current_major_fork_bank.squash();
// Try to get cache flush/clean to overlap with the scan
current_major_fork_bank.force_flush_accounts_cache();
current_major_fork_bank.clean_accounts(false, false, None);
// Move purge here so that Bank::drop()->purge_slots() doesn't race
// with clean. Simulates the call from AccountsBackgroundService
let is_abs_service = true;
abs_request_handler
.handle_pruned_banks(¤t_major_fork_bank, is_abs_service);
}
},
Some(Box::new(SendDroppedBankCallback::new(
pruned_banks_sender.clone(),
))),
AcceptableScanResults::NoFailure,
)
}
}
#[test]
fn test_store_scan_consistency_root() {
for accounts_db_caching_enabled in &[false, true] {
test_store_scan_consistency(
*accounts_db_caching_enabled,
|bank0,
bank_to_scan_sender,
_scan_finished_receiver,
pubkeys_to_modify,
program_id,
starting_lamports| {
let mut current_bank = bank0.clone();
let mut prev_bank = bank0;
loop {
let lamports_this_round = current_bank.slot() + starting_lamports + 1;
let account = AccountSharedData::new(lamports_this_round, 0, &program_id);
for key in pubkeys_to_modify.iter() {
current_bank.store_account(key, &account);
}
current_bank.freeze();
// Send the previous bank to the scan thread to perform the scan.
// Meanwhile this thread will squash and update roots immediately after
// so the roots will update while scanning.
//
// The capacity of the channel is 1 so that this thread will wait for the scan to finish before starting
// the next iteration, allowing the scan to stay in sync with these updates
// such that every scan will see this interruption.
if bank_to_scan_sender.send(prev_bank).is_err() {
// Channel was disconnected, exit
return;
}
current_bank.squash();
if current_bank.slot() % 2 == 0 {
current_bank.force_flush_accounts_cache();
current_bank.clean_accounts(true, false, None);
}
prev_bank = current_bank.clone();
current_bank = Arc::new(Bank::new_from_parent(
¤t_bank,
&solana_sdk::pubkey::new_rand(),
current_bank.slot() + 1,
));
}
},
None,
AcceptableScanResults::NoFailure,
);
}
}
fn setup_banks_on_fork_to_remove(
bank0: Arc<Bank>,
pubkeys_to_modify: Arc<HashSet<Pubkey>>,
program_id: &Pubkey,
starting_lamports: u64,
num_banks_on_fork: usize,
step_size: usize,
) -> (Arc<Bank>, Vec<(Slot, BankId)>, Ancestors) {
// Need at least 2 keys to create inconsistency in account balances when deleting
// slots
assert!(pubkeys_to_modify.len() > 1);
// Tracks the bank at the tip of the to be created fork
let mut bank_at_fork_tip = bank0;
// All the slots on the fork except slot 0
let mut slots_on_fork = Vec::with_capacity(num_banks_on_fork);
// All accounts in each set of `step_size` slots will have the same account balances.
// The account balances of the accounts changes every `step_size` banks. Thus if you
// delete any one of the latest `step_size` slots, then you will see varying account
// balances when loading the accounts.
assert!(num_banks_on_fork >= 2);
assert!(step_size >= 2);
let pubkeys_to_modify: Vec<Pubkey> = pubkeys_to_modify.iter().cloned().collect();
let pubkeys_to_modify_per_slot = (pubkeys_to_modify.len() / step_size).max(1);
for _ in (0..num_banks_on_fork).step_by(step_size) {
let mut lamports_this_round = 0;
for i in 0..step_size {
bank_at_fork_tip = Arc::new(Bank::new_from_parent(
&bank_at_fork_tip,
&solana_sdk::pubkey::new_rand(),
bank_at_fork_tip.slot() + 1,
));
if lamports_this_round == 0 {
lamports_this_round = bank_at_fork_tip.bank_id() + starting_lamports + 1;
}
let pubkey_to_modify_starting_index = i * pubkeys_to_modify_per_slot;
let account = AccountSharedData::new(lamports_this_round, 0, program_id);
for pubkey_index_to_modify in pubkey_to_modify_starting_index
..pubkey_to_modify_starting_index + pubkeys_to_modify_per_slot
{
let key = pubkeys_to_modify[pubkey_index_to_modify % pubkeys_to_modify.len()];
bank_at_fork_tip.store_account(&key, &account);
}
bank_at_fork_tip.freeze();
slots_on_fork.push((bank_at_fork_tip.slot(), bank_at_fork_tip.bank_id()));
}
}
let ancestors: Vec<(Slot, usize)> = slots_on_fork.iter().map(|(s, _)| (*s, 0)).collect();
let ancestors = Ancestors::from(ancestors);
(bank_at_fork_tip, slots_on_fork, ancestors)
}
#[test]
fn test_remove_unrooted_before_scan() {
for accounts_db_caching_enabled in &[false, true] {
test_store_scan_consistency(
*accounts_db_caching_enabled,
|bank0,
bank_to_scan_sender,
scan_finished_receiver,
pubkeys_to_modify,
program_id,
starting_lamports| {
loop {
let (bank_at_fork_tip, slots_on_fork, ancestors) =
setup_banks_on_fork_to_remove(
bank0.clone(),
pubkeys_to_modify.clone(),
&program_id,
starting_lamports,
10,
2,
);
// Test removing the slot before the scan starts, should cause
// SlotRemoved error every time
for k in pubkeys_to_modify.iter() {
assert!(bank_at_fork_tip.load_slow(&ancestors, k).is_some());
}
bank_at_fork_tip.remove_unrooted_slots(&slots_on_fork);
// Accounts on this fork should not be found after removal
for k in pubkeys_to_modify.iter() {
assert!(bank_at_fork_tip.load_slow(&ancestors, k).is_none());
}
if bank_to_scan_sender.send(bank_at_fork_tip.clone()).is_err() {
return;
}
// Wait for scan to finish before starting next iteration
let finished_scan_bank_id = scan_finished_receiver.recv();
if finished_scan_bank_id.is_err() {
return;
}
assert_eq!(finished_scan_bank_id.unwrap(), bank_at_fork_tip.bank_id());
}
},
None,
// Test removing the slot before the scan starts, should error every time
AcceptableScanResults::DroppedSlotError,
);
}
}
#[test]
fn test_remove_unrooted_scan_then_recreate_same_slot_before_scan() {
for accounts_db_caching_enabled in &[false, true] {
test_store_scan_consistency(
*accounts_db_caching_enabled,
|bank0,
bank_to_scan_sender,
scan_finished_receiver,
pubkeys_to_modify,
program_id,
starting_lamports| {
let mut prev_bank = bank0.clone();
loop {
let start = Instant::now();
let (bank_at_fork_tip, slots_on_fork, ancestors) =
setup_banks_on_fork_to_remove(
bank0.clone(),
pubkeys_to_modify.clone(),
&program_id,
starting_lamports,
10,
2,
);
info!("setting up banks elapsed: {}", start.elapsed().as_millis());
// Remove the fork. Then we'll recreate the slots and only after we've
// recreated the slots, do we send this old bank for scanning.
// Skip scanning bank 0 on first iteration of loop, since those accounts
// aren't being removed
if prev_bank.slot() != 0 {
info!(
"sending bank with slot: {:?}, elapsed: {}",
prev_bank.slot(),
start.elapsed().as_millis()
);
// Although we dumped the slots last iteration via `remove_unrooted_slots()`,
// we've recreated those slots this iteration, so they should be findable
// again
for k in pubkeys_to_modify.iter() {
assert!(bank_at_fork_tip.load_slow(&ancestors, k).is_some());
}
// Now after we've recreated the slots removed in the previous loop
// iteration, send the previous bank, should fail even though the
// same slots were recreated
if bank_to_scan_sender.send(prev_bank.clone()).is_err() {
return;
}
let finished_scan_bank_id = scan_finished_receiver.recv();
if finished_scan_bank_id.is_err() {
return;
}
// Wait for scan to finish before starting next iteration
assert_eq!(finished_scan_bank_id.unwrap(), prev_bank.bank_id());
}
bank_at_fork_tip.remove_unrooted_slots(&slots_on_fork);
prev_bank = bank_at_fork_tip;
}
},
None,
// Test removing the slot before the scan starts, should error every time
AcceptableScanResults::DroppedSlotError,
);
}
}
#[test]
fn test_remove_unrooted_scan_interleaved_with_remove_unrooted_slots() {
for accounts_db_caching_enabled in &[false, true] {
test_store_scan_consistency(
*accounts_db_caching_enabled,
|bank0,
bank_to_scan_sender,
scan_finished_receiver,
pubkeys_to_modify,
program_id,
starting_lamports| {
loop {
let step_size = 2;
let (bank_at_fork_tip, slots_on_fork, ancestors) =
setup_banks_on_fork_to_remove(
bank0.clone(),
pubkeys_to_modify.clone(),
&program_id,
starting_lamports,
10,
step_size,
);
// Although we dumped the slots last iteration via `remove_unrooted_slots()`,
// we've recreated those slots this iteration, so they should be findable
// again
for k in pubkeys_to_modify.iter() {
assert!(bank_at_fork_tip.load_slow(&ancestors, k).is_some());
}
// Now after we've recreated the slots removed in the previous loop
// iteration, send the previous bank, should fail even though the
// same slots were recreated
if bank_to_scan_sender.send(bank_at_fork_tip.clone()).is_err() {
return;
}
// Remove 1 < `step_size` of the *latest* slots while the scan is happening.
// This should create inconsistency between the account balances of accounts
// stored in that slot, and the accounts stored in earlier slots
let slot_to_remove = *slots_on_fork.last().unwrap();
bank_at_fork_tip.remove_unrooted_slots(&[slot_to_remove]);
// Wait for scan to finish before starting next iteration
let finished_scan_bank_id = scan_finished_receiver.recv();
if finished_scan_bank_id.is_err() {
return;
}
assert_eq!(finished_scan_bank_id.unwrap(), bank_at_fork_tip.bank_id());
// Remove the rest of the slots before the next iteration
for (slot, bank_id) in slots_on_fork {
bank_at_fork_tip.remove_unrooted_slots(&[(slot, bank_id)]);
}
}
},
None,
// Test removing the slot before the scan starts, should error every time
AcceptableScanResults::Both,
);
}
}
#[test]
fn test_get_inflation_start_slot_devnet_testnet() {
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
genesis_config
.accounts
.remove(&feature_set::pico_inflation::id())
.unwrap();
genesis_config
.accounts
.remove(&feature_set::full_inflation::devnet_and_testnet::id())
.unwrap();
for pair in feature_set::FULL_INFLATION_FEATURE_PAIRS.iter() {
genesis_config.accounts.remove(&pair.vote_id).unwrap();
genesis_config.accounts.remove(&pair.enable_id).unwrap();
}
let bank = Bank::new_for_tests(&genesis_config);
// Advance slot
let mut bank = new_from_parent(&Arc::new(bank));
bank = new_from_parent(&Arc::new(bank));
assert_eq!(bank.get_inflation_start_slot(), 0);
assert_eq!(bank.slot(), 2);
// Request `pico_inflation` activation
bank.store_account(
&feature_set::pico_inflation::id(),
&feature::create_account(
&Feature {
activated_at: Some(1),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_start_slot(), 1);
// Advance slot
bank = new_from_parent(&Arc::new(bank));
assert_eq!(bank.slot(), 3);
// Request `full_inflation::devnet_and_testnet` activation,
// which takes priority over pico_inflation
bank.store_account(
&feature_set::full_inflation::devnet_and_testnet::id(),
&feature::create_account(
&Feature {
activated_at: Some(2),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_start_slot(), 2);
// Request `full_inflation::mainnet::certusone` activation,
// which should have no effect on `get_inflation_start_slot`
bank.store_account(
&feature_set::full_inflation::mainnet::certusone::vote::id(),
&feature::create_account(
&Feature {
activated_at: Some(3),
},
42,
),
);
bank.store_account(
&feature_set::full_inflation::mainnet::certusone::enable::id(),
&feature::create_account(
&Feature {
activated_at: Some(3),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_start_slot(), 2);
}
#[test]
fn test_get_inflation_start_slot_mainnet() {
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
genesis_config
.accounts
.remove(&feature_set::pico_inflation::id())
.unwrap();
genesis_config
.accounts
.remove(&feature_set::full_inflation::devnet_and_testnet::id())
.unwrap();
for pair in feature_set::FULL_INFLATION_FEATURE_PAIRS.iter() {
genesis_config.accounts.remove(&pair.vote_id).unwrap();
genesis_config.accounts.remove(&pair.enable_id).unwrap();
}
let bank = Bank::new_for_tests(&genesis_config);
// Advance slot
let mut bank = new_from_parent(&Arc::new(bank));
bank = new_from_parent(&Arc::new(bank));
assert_eq!(bank.get_inflation_start_slot(), 0);
assert_eq!(bank.slot(), 2);
// Request `pico_inflation` activation
bank.store_account(
&feature_set::pico_inflation::id(),
&feature::create_account(
&Feature {
activated_at: Some(1),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_start_slot(), 1);
// Advance slot
bank = new_from_parent(&Arc::new(bank));
assert_eq!(bank.slot(), 3);
// Request `full_inflation::mainnet::certusone` activation,
// which takes priority over pico_inflation
bank.store_account(
&feature_set::full_inflation::mainnet::certusone::vote::id(),
&feature::create_account(
&Feature {
activated_at: Some(2),
},
42,
),
);
bank.store_account(
&feature_set::full_inflation::mainnet::certusone::enable::id(),
&feature::create_account(
&Feature {
activated_at: Some(2),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_start_slot(), 2);
// Advance slot
bank = new_from_parent(&Arc::new(bank));
assert_eq!(bank.slot(), 4);
// Request `full_inflation::devnet_and_testnet` activation,
// which should have no effect on `get_inflation_start_slot`
bank.store_account(
&feature_set::full_inflation::devnet_and_testnet::id(),
&feature::create_account(
&Feature {
activated_at: Some(bank.slot()),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_start_slot(), 2);
}
#[test]
fn test_get_inflation_num_slots_with_activations() {
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
let slots_per_epoch = 32;
genesis_config.epoch_schedule = EpochSchedule::new(slots_per_epoch);
genesis_config
.accounts
.remove(&feature_set::pico_inflation::id())
.unwrap();
genesis_config
.accounts
.remove(&feature_set::full_inflation::devnet_and_testnet::id())
.unwrap();
for pair in feature_set::FULL_INFLATION_FEATURE_PAIRS.iter() {
genesis_config.accounts.remove(&pair.vote_id).unwrap();
genesis_config.accounts.remove(&pair.enable_id).unwrap();
}
let mut bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.get_inflation_num_slots(), 0);
for _ in 0..2 * slots_per_epoch {
bank = new_from_parent(&Arc::new(bank));
}
assert_eq!(bank.get_inflation_num_slots(), 2 * slots_per_epoch);
// Activate pico_inflation
let pico_inflation_activation_slot = bank.slot();
bank.store_account(
&feature_set::pico_inflation::id(),
&feature::create_account(
&Feature {
activated_at: Some(pico_inflation_activation_slot),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_num_slots(), slots_per_epoch);
for _ in 0..slots_per_epoch {
bank = new_from_parent(&Arc::new(bank));
}
assert_eq!(bank.get_inflation_num_slots(), 2 * slots_per_epoch);
// Activate full_inflation::devnet_and_testnet
let full_inflation_activation_slot = bank.slot();
bank.store_account(
&feature_set::full_inflation::devnet_and_testnet::id(),
&feature::create_account(
&Feature {
activated_at: Some(full_inflation_activation_slot),
},
42,
),
);
bank.compute_active_feature_set(true);
assert_eq!(bank.get_inflation_num_slots(), slots_per_epoch);
for _ in 0..slots_per_epoch {
bank = new_from_parent(&Arc::new(bank));
}
assert_eq!(bank.get_inflation_num_slots(), 2 * slots_per_epoch);
}
#[test]
fn test_get_inflation_num_slots_already_activated() {
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
let slots_per_epoch = 32;
genesis_config.epoch_schedule = EpochSchedule::new(slots_per_epoch);
let mut bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.get_inflation_num_slots(), 0);
for _ in 0..slots_per_epoch {
bank = new_from_parent(&Arc::new(bank));
}
assert_eq!(bank.get_inflation_num_slots(), slots_per_epoch);
for _ in 0..slots_per_epoch {
bank = new_from_parent(&Arc::new(bank));
}
assert_eq!(bank.get_inflation_num_slots(), 2 * slots_per_epoch);
}
#[test]
fn test_stake_vote_account_validity() {
let validator_vote_keypairs0 = ValidatorVoteKeypairs::new_rand();
let validator_vote_keypairs1 = ValidatorVoteKeypairs::new_rand();
let validator_keypairs = vec![&validator_vote_keypairs0, &validator_vote_keypairs1];
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts(
1_000_000_000,
&validator_keypairs,
vec![10_000; 2],
);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let vote_and_stake_accounts = bank
.load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer())
.vote_with_stake_delegations_map;
assert_eq!(vote_and_stake_accounts.len(), 2);
let mut vote_account = bank
.get_account(&validator_vote_keypairs0.vote_keypair.pubkey())
.unwrap_or_default();
let original_lamports = vote_account.lamports();
vote_account.set_lamports(0);
// Simulate vote account removal via full withdrawal
bank.store_account(
&validator_vote_keypairs0.vote_keypair.pubkey(),
&vote_account,
);
// Modify staked vote account owner; a vote account owned by another program could be
// freely modified with malicious data
let bogus_vote_program = Pubkey::new_unique();
vote_account.set_lamports(original_lamports);
vote_account.set_owner(bogus_vote_program);
bank.store_account(
&validator_vote_keypairs0.vote_keypair.pubkey(),
&vote_account,
);
assert_eq!(bank.vote_accounts().len(), 1);
// Modify stake account owner; a stake account owned by another program could be freely
// modified with malicious data
let bogus_stake_program = Pubkey::new_unique();
let mut stake_account = bank
.get_account(&validator_vote_keypairs1.stake_keypair.pubkey())
.unwrap_or_default();
stake_account.set_owner(bogus_stake_program);
bank.store_account(
&validator_vote_keypairs1.stake_keypair.pubkey(),
&stake_account,
);
// Accounts must be valid stake and vote accounts
let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let vote_and_stake_accounts = bank
.load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer())
.vote_with_stake_delegations_map;
assert_eq!(vote_and_stake_accounts.len(), 0);
}
#[test]
fn test_vote_epoch_panic() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(
1_000_000_000_000_000,
&Pubkey::new_unique(),
bootstrap_validator_stake_lamports(),
);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
let vote_keypair = keypair_from_seed(&[1u8; 32]).unwrap();
let stake_keypair = keypair_from_seed(&[2u8; 32]).unwrap();
let mut setup_ixs = Vec::new();
setup_ixs.extend(
vote_instruction::create_account(
&mint_keypair.pubkey(),
&vote_keypair.pubkey(),
&VoteInit {
node_pubkey: mint_keypair.pubkey(),
authorized_voter: vote_keypair.pubkey(),
authorized_withdrawer: mint_keypair.pubkey(),
commission: 0,
},
1_000_000_000,
)
.into_iter(),
);
setup_ixs.extend(
stake_instruction::create_account_and_delegate_stake(
&mint_keypair.pubkey(),
&stake_keypair.pubkey(),
&vote_keypair.pubkey(),
&Authorized::auto(&mint_keypair.pubkey()),
&Lockup::default(),
1_000_000_000_000,
)
.into_iter(),
);
setup_ixs.push(vote_instruction::withdraw(
&vote_keypair.pubkey(),
&mint_keypair.pubkey(),
1_000_000_000,
&mint_keypair.pubkey(),
));
setup_ixs.push(system_instruction::transfer(
&mint_keypair.pubkey(),
&vote_keypair.pubkey(),
1_000_000_000,
));
let result = bank.process_transaction(&Transaction::new(
&[&mint_keypair, &vote_keypair, &stake_keypair],
Message::new(&setup_ixs, Some(&mint_keypair.pubkey())),
bank.last_blockhash(),
));
assert!(result.is_ok());
let _bank = Bank::new_from_parent(
&bank,
&mint_keypair.pubkey(),
genesis_config.epoch_schedule.get_first_slot_in_epoch(1),
);
}
#[test]
fn test_tx_log_order() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(
1_000_000_000_000_000,
&Pubkey::new_unique(),
bootstrap_validator_stake_lamports(),
);
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
*bank.transaction_log_collector_config.write().unwrap() = TransactionLogCollectorConfig {
mentioned_addresses: HashSet::new(),
filter: TransactionLogCollectorFilter::All,
};
let blockhash = bank.last_blockhash();
let sender0 = Keypair::new();
let sender1 = Keypair::new();
bank.transfer(100, &mint_keypair, &sender0.pubkey())
.unwrap();
bank.transfer(100, &mint_keypair, &sender1.pubkey())
.unwrap();
let recipient0 = Pubkey::new_unique();
let recipient1 = Pubkey::new_unique();
let tx0 = system_transaction::transfer(&sender0, &recipient0, 10, blockhash);
let success_sig = tx0.signatures[0];
let tx1 = system_transaction::transfer(&sender1, &recipient1, 110, blockhash); // Should produce insufficient funds log
let failure_sig = tx1.signatures[0];
let tx2 = system_transaction::transfer(&sender0, &recipient0, 1, blockhash);
let txs = vec![tx0, tx1, tx2];
let batch = bank.prepare_batch_for_tests(txs);
let log_results = bank
.load_execute_and_commit_transactions(
&batch,
MAX_PROCESSING_AGE,
false,
false,
true,
&mut ExecuteTimings::default(),
)
.3;
assert_eq!(log_results.len(), 3);
assert!(log_results[0].as_ref().unwrap()[1].contains(&"success".to_string()));
assert!(log_results[1].as_ref().unwrap()[2].contains(&"failed".to_string()));
assert!(log_results[2].as_ref().is_none());
let stored_logs = &bank.transaction_log_collector.read().unwrap().logs;
let success_log_info = stored_logs
.iter()
.find(|transaction_log_info| transaction_log_info.signature == success_sig)
.unwrap();
assert!(success_log_info.result.is_ok());
let success_log = success_log_info.log_messages.clone().pop().unwrap();
assert!(success_log.contains(&"success".to_string()));
let failure_log_info = stored_logs
.iter()
.find(|transaction_log_info| transaction_log_info.signature == failure_sig)
.unwrap();
assert!(failure_log_info.result.is_err());
let failure_log = failure_log_info.log_messages.clone().pop().unwrap();
assert!(failure_log.contains(&"failed".to_string()));
}
#[test]
fn test_get_largest_accounts() {
let GenesisConfigInfo { genesis_config, .. } =
create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
let bank = Bank::new_for_tests(&genesis_config);
let pubkeys: Vec<_> = (0..5).map(|_| Pubkey::new_unique()).collect();
let pubkeys_hashset: HashSet<_> = pubkeys.iter().cloned().collect();
let pubkeys_balances: Vec<_> = pubkeys
.iter()
.cloned()
.zip(vec![
sol_to_lamports(2.0),
sol_to_lamports(3.0),
sol_to_lamports(3.0),
sol_to_lamports(4.0),
sol_to_lamports(5.0),
])
.collect();
// Initialize accounts; all have larger SOL balances than current Bank built-ins
let account0 = AccountSharedData::new(pubkeys_balances[0].1, 0, &Pubkey::default());
bank.store_account(&pubkeys_balances[0].0, &account0);
let account1 = AccountSharedData::new(pubkeys_balances[1].1, 0, &Pubkey::default());
bank.store_account(&pubkeys_balances[1].0, &account1);
let account2 = AccountSharedData::new(pubkeys_balances[2].1, 0, &Pubkey::default());
bank.store_account(&pubkeys_balances[2].0, &account2);
let account3 = AccountSharedData::new(pubkeys_balances[3].1, 0, &Pubkey::default());
bank.store_account(&pubkeys_balances[3].0, &account3);
let account4 = AccountSharedData::new(pubkeys_balances[4].1, 0, &Pubkey::default());
bank.store_account(&pubkeys_balances[4].0, &account4);
// Create HashSet to exclude an account
let exclude4: HashSet<_> = pubkeys[4..].iter().cloned().collect();
let mut sorted_accounts = pubkeys_balances.clone();
sorted_accounts.sort_by(|a, b| a.1.cmp(&b.1).reverse());
// Return only one largest account
assert_eq!(
bank.get_largest_accounts(1, &pubkeys_hashset, AccountAddressFilter::Include)
.unwrap(),
vec![(pubkeys[4], sol_to_lamports(5.0))]
);
assert_eq!(
bank.get_largest_accounts(1, &HashSet::new(), AccountAddressFilter::Exclude)
.unwrap(),
vec![(pubkeys[4], sol_to_lamports(5.0))]
);
assert_eq!(
bank.get_largest_accounts(1, &exclude4, AccountAddressFilter::Exclude)
.unwrap(),
vec![(pubkeys[3], sol_to_lamports(4.0))]
);
// Return all added accounts
let results = bank
.get_largest_accounts(10, &pubkeys_hashset, AccountAddressFilter::Include)
.unwrap();
assert_eq!(results.len(), sorted_accounts.len());
for pubkey_balance in sorted_accounts.iter() {
assert!(results.contains(pubkey_balance));
}
let mut sorted_results = results.clone();
sorted_results.sort_by(|a, b| a.1.cmp(&b.1).reverse());
assert_eq!(sorted_results, results);
let expected_accounts = sorted_accounts[1..].to_vec();
let results = bank
.get_largest_accounts(10, &exclude4, AccountAddressFilter::Exclude)
.unwrap();
// results include 5 Bank builtins
assert_eq!(results.len(), 10);
for pubkey_balance in expected_accounts.iter() {
assert!(results.contains(pubkey_balance));
}
let mut sorted_results = results.clone();
sorted_results.sort_by(|a, b| a.1.cmp(&b.1).reverse());
assert_eq!(sorted_results, results);
// Return 3 added accounts
let expected_accounts = sorted_accounts[0..4].to_vec();
let results = bank
.get_largest_accounts(4, &pubkeys_hashset, AccountAddressFilter::Include)
.unwrap();
assert_eq!(results.len(), expected_accounts.len());
for pubkey_balance in expected_accounts.iter() {
assert!(results.contains(pubkey_balance));
}
let expected_accounts = expected_accounts[1..4].to_vec();
let results = bank
.get_largest_accounts(3, &exclude4, AccountAddressFilter::Exclude)
.unwrap();
assert_eq!(results.len(), expected_accounts.len());
for pubkey_balance in expected_accounts.iter() {
assert!(results.contains(pubkey_balance));
}
// Exclude more, and non-sequential, accounts
let exclude: HashSet<_> = vec![pubkeys[0], pubkeys[2], pubkeys[4]]
.iter()
.cloned()
.collect();
assert_eq!(
bank.get_largest_accounts(2, &exclude, AccountAddressFilter::Exclude)
.unwrap(),
vec![pubkeys_balances[3], pubkeys_balances[1]]
);
}
#[test]
fn test_transfer_sysvar() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(
1_000_000_000_000_000,
&Pubkey::new_unique(),
bootstrap_validator_stake_lamports(),
);
let mut bank = Bank::new_for_tests(&genesis_config);
fn mock_ix_processor(
first_instruction_account: usize,
_data: &[u8],
invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
use solana_sdk::account::WritableAccount;
let keyed_accounts = invoke_context.get_keyed_accounts()?;
let data = keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?;
data.try_account_ref_mut()?.data_as_mut_slice()[0] = 5;
Ok(())
}
let program_id = solana_sdk::pubkey::new_rand();
bank.add_builtin("mock_program1", &program_id, mock_ix_processor);
let blockhash = bank.last_blockhash();
#[allow(deprecated)]
let blockhash_sysvar = sysvar::clock::id();
#[allow(deprecated)]
let orig_lamports = bank.get_account(&sysvar::clock::id()).unwrap().lamports();
info!("{:?}", bank.get_account(&sysvar::clock::id()));
let tx = system_transaction::transfer(&mint_keypair, &blockhash_sysvar, 10, blockhash);
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::InstructionError(
0,
InstructionError::ReadonlyLamportChange
))
);
assert_eq!(
bank.get_account(&sysvar::clock::id()).unwrap().lamports(),
orig_lamports
);
info!("{:?}", bank.get_account(&sysvar::clock::id()));
let accounts = vec![
AccountMeta::new(mint_keypair.pubkey(), true),
AccountMeta::new(blockhash_sysvar, false),
];
let ix = Instruction::new_with_bincode(program_id, &0, accounts);
let message = Message::new(&[ix], Some(&mint_keypair.pubkey()));
let tx = Transaction::new(&[&mint_keypair], message, blockhash);
assert_eq!(
bank.process_transaction(&tx),
Err(TransactionError::InstructionError(
0,
InstructionError::ReadonlyDataModified
))
);
}
#[test]
fn test_clean_dropped_unrooted_frozen_banks() {
solana_logger::setup();
do_test_clean_dropped_unrooted_banks(FreezeBank1::Yes);
}
#[test]
fn test_clean_dropped_unrooted_unfrozen_banks() {
solana_logger::setup();
do_test_clean_dropped_unrooted_banks(FreezeBank1::No);
}
/// A simple enum to toggle freezing Bank1 or not. Used in the clean_dropped_unrooted tests.
enum FreezeBank1 {
No,
Yes,
}
fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) {
//! Test that dropped unrooted banks are cleaned up properly
//!
//! slot 0: bank0 (rooted)
//! / \
//! slot 1: / bank1 (unrooted and dropped)
//! /
//! slot 2: bank2 (rooted)
//!
//! In the scenario above, when `clean_accounts()` is called on bank2, the keys that exist
//! _only_ in bank1 should be cleaned up, since those keys are unreachable.
//!
//! The following scenarios are tested:
//!
//! 1. A key is written _only_ in an unrooted bank (key1)
//! - In this case, key1 should be cleaned up
//! 2. A key is written in both an unrooted _and_ rooted bank (key3)
//! - In this case, key3's ref-count should be decremented correctly
//! 3. A key with zero lamports is _only_ in an unrooted bank (key4)
//! - In this case, key4 should be cleaned up
//! 4. A key with zero lamports is in both an unrooted _and_ rooted bank (key5)
//! - In this case, key5's ref-count should be decremented correctly
let (genesis_config, mint_keypair) = create_genesis_config(100);
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
let collector = Pubkey::new_unique();
let owner = Pubkey::new_unique();
let key1 = Keypair::new(); // only touched in bank1
let key2 = Keypair::new(); // only touched in bank2
let key3 = Keypair::new(); // touched in both bank1 and bank2
let key4 = Keypair::new(); // in only bank1, and has zero lamports
let key5 = Keypair::new(); // in both bank1 and bank2, and has zero lamports
bank0.transfer(2, &mint_keypair, &key2.pubkey()).unwrap();
bank0.freeze();
let slot = 1;
let bank1 = Bank::new_from_parent(&bank0, &collector, slot);
bank1.transfer(3, &mint_keypair, &key1.pubkey()).unwrap();
bank1.store_account(&key4.pubkey(), &AccountSharedData::new(0, 0, &owner));
bank1.store_account(&key5.pubkey(), &AccountSharedData::new(0, 0, &owner));
if let FreezeBank1::Yes = freeze_bank1 {
bank1.freeze();
}
let slot = slot + 1;
let bank2 = Bank::new_from_parent(&bank0, &collector, slot);
bank2.transfer(4, &mint_keypair, &key2.pubkey()).unwrap();
bank2.transfer(6, &mint_keypair, &key3.pubkey()).unwrap();
bank2.store_account(&key5.pubkey(), &AccountSharedData::new(0, 0, &owner));
bank2.freeze(); // the freeze here is not strictly necessary, but more for illustration
bank2.squash();
drop(bank1);
bank2.clean_accounts(false, false, None);
let expected_ref_count_for_cleaned_up_keys = 0;
let expected_ref_count_for_keys_in_both_slot1_and_slot2 = 1;
assert_eq!(
bank2
.rc
.accounts
.accounts_db
.accounts_index
.ref_count_from_storage(&key1.pubkey()),
expected_ref_count_for_cleaned_up_keys
);
assert_ne!(
bank2
.rc
.accounts
.accounts_db
.accounts_index
.ref_count_from_storage(&key3.pubkey()),
expected_ref_count_for_cleaned_up_keys
);
assert_eq!(
bank2
.rc
.accounts
.accounts_db
.accounts_index
.ref_count_from_storage(&key4.pubkey()),
expected_ref_count_for_cleaned_up_keys
);
assert_eq!(
bank2
.rc
.accounts
.accounts_db
.accounts_index
.ref_count_from_storage(&key5.pubkey()),
expected_ref_count_for_keys_in_both_slot1_and_slot2,
);
assert_eq!(
bank2.rc.accounts.accounts_db.alive_account_count_in_slot(1),
0
);
}
#[test]
fn test_rent_debits() {
let mut rent_debits = RentDebits::default();
// No entry for 0 rewards
rent_debits.insert(&Pubkey::new_unique(), 0, 0);
assert_eq!(rent_debits.0.len(), 0);
// Some that actually work
rent_debits.insert(&Pubkey::new_unique(), 1, 0);
assert_eq!(rent_debits.0.len(), 1);
rent_debits.insert(&Pubkey::new_unique(), i64::MAX as u64, 0);
assert_eq!(rent_debits.0.len(), 2);
}
#[test]
fn test_compute_budget_program_noop() {
solana_logger::setup();
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(
1_000_000_000_000_000,
&Pubkey::new_unique(),
bootstrap_validator_stake_lamports(),
);
// activate all features except..
activate_all_features(&mut genesis_config);
genesis_config
.accounts
.remove(&feature_set::tx_wide_compute_cap::id());
genesis_config
.accounts
.remove(&feature_set::requestable_heap_size::id());
let mut bank = Bank::new_for_tests(&genesis_config);
fn mock_ix_processor(
_first_instruction_account: usize,
_data: &[u8],
invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
let compute_budget = invoke_context.get_compute_budget();
assert_eq!(
*compute_budget,
ComputeBudget {
max_units: 200_000,
heap_size: None,
..ComputeBudget::default()
}
);
Ok(())
}
let program_id = solana_sdk::pubkey::new_rand();
bank.add_builtin("mock_program", &program_id, mock_ix_processor);
let message = Message::new(
&[
ComputeBudgetInstruction::request_units(1),
ComputeBudgetInstruction::request_heap_frame(48 * 1024),
Instruction::new_with_bincode(program_id, &0, vec![]),
],
Some(&mint_keypair.pubkey()),
);
let tx = Transaction::new(&[&mint_keypair], message, bank.last_blockhash());
bank.process_transaction(&tx).unwrap();
}
#[test]
fn test_compute_request_instruction() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(
1_000_000_000_000_000,
&Pubkey::new_unique(),
bootstrap_validator_stake_lamports(),
);
let mut bank = Bank::new_for_tests(&genesis_config);
fn mock_ix_processor(
_first_instruction_account: usize,
_data: &[u8],
invoke_context: &mut InvokeContext,
) -> std::result::Result<(), InstructionError> {
let compute_budget = invoke_context.get_compute_budget();
assert_eq!(
*compute_budget,
ComputeBudget {
max_units: 1,
heap_size: Some(48 * 1024),
..ComputeBudget::default()
}
);
Ok(())
}
let program_id = solana_sdk::pubkey::new_rand();
bank.add_builtin("mock_program", &program_id, mock_ix_processor);
let message = Message::new(
&[
ComputeBudgetInstruction::request_units(1),
ComputeBudgetInstruction::request_heap_frame(48 * 1024),
Instruction::new_with_bincode(program_id, &0, vec![]),
],
Some(&mint_keypair.pubkey()),
);
let tx = Transaction::new(&[&mint_keypair], message, bank.last_blockhash());
bank.process_transaction(&tx).unwrap();
}
#[test]
fn test_verify_and_hash_transaction_sig_len() {
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
// activate all features but verify_tx_signatures_len
activate_all_features(&mut genesis_config);
genesis_config
.accounts
.remove(&feature_set::verify_tx_signatures_len::id());
let bank = Bank::new_for_tests(&genesis_config);
let mut rng = rand::thread_rng();
let recent_blockhash = hash::new_rand(&mut rng);
let from_keypair = Keypair::new();
let to_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
let to_pubkey = to_keypair.pubkey();
enum TestCase {
AddSignature,
RemoveSignature,
}
let make_transaction = |case: TestCase| {
let message = Message::new(
&[system_instruction::transfer(&from_pubkey, &to_pubkey, 1)],
Some(&from_pubkey),
);
let mut tx = Transaction::new(&[&from_keypair], message, recent_blockhash);
assert_eq!(tx.message.header.num_required_signatures, 1);
match case {
TestCase::AddSignature => {
let signature = to_keypair.sign_message(&tx.message.serialize());
tx.signatures.push(signature);
}
TestCase::RemoveSignature => {
tx.signatures.remove(0);
}
}
tx
};
// Too few signatures: Sanitization failure
{
let tx = make_transaction(TestCase::RemoveSignature);
assert_eq!(
bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification)
.err(),
Some(TransactionError::SanitizeFailure),
);
}
// Too many signatures: Sanitization failure
{
let tx = make_transaction(TestCase::AddSignature);
assert_eq!(
bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification)
.err(),
Some(TransactionError::SanitizeFailure),
);
}
}
#[test]
fn test_verify_transactions_load_duplicate_account() {
let GenesisConfigInfo { genesis_config, .. } =
create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
let bank = Bank::new_for_tests(&genesis_config);
let mut rng = rand::thread_rng();
let recent_blockhash = hash::new_rand(&mut rng);
let from_keypair = Keypair::new();
let to_keypair = Keypair::new();
let from_pubkey = from_keypair.pubkey();
let to_pubkey = to_keypair.pubkey();
let make_transaction = || {
let mut message = Message::new(
&[system_instruction::transfer(&from_pubkey, &to_pubkey, 1)],
Some(&from_pubkey),
);
let to_index = message
.account_keys
.iter()
.position(|k| k == &to_pubkey)
.unwrap();
message.account_keys[to_index] = from_pubkey;
Transaction::new(&[&from_keypair], message, recent_blockhash)
};
// Duplicate account
{
let tx = make_transaction();
assert_eq!(
bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification)
.err(),
Some(TransactionError::AccountLoadedTwice),
);
}
}
#[test]
fn test_verify_transactions_packet_data_size() {
let GenesisConfigInfo { genesis_config, .. } =
create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42);
let bank = Bank::new_for_tests(&genesis_config);
let mut rng = rand::thread_rng();
let recent_blockhash = hash::new_rand(&mut rng);
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let make_transaction = |size| {
let ixs: Vec<_> = std::iter::repeat_with(|| {
system_instruction::transfer(&pubkey, &Pubkey::new_unique(), 1)
})
.take(size)
.collect();
let message = Message::new(&ixs[..], Some(&pubkey));
Transaction::new(&[&keypair], message, recent_blockhash)
};
// Small transaction.
{
let tx = make_transaction(5);
assert!(bincode::serialized_size(&tx).unwrap() <= PACKET_DATA_SIZE as u64);
assert!(bank
.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification)
.is_ok(),);
}
// Big transaction.
{
let tx = make_transaction(25);
assert!(bincode::serialized_size(&tx).unwrap() > PACKET_DATA_SIZE as u64);
assert_eq!(
bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification)
.err(),
Some(TransactionError::SanitizeFailure),
);
}
// Assert that verify fails as soon as serialized
// size exceeds packet data size.
for size in 1..30 {
let tx = make_transaction(size);
assert_eq!(
bincode::serialized_size(&tx).unwrap() <= PACKET_DATA_SIZE as u64,
bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification)
.is_ok(),
);
}
}
#[test]
fn test_call_precomiled_program() {
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
} = create_genesis_config_with_leader(42, &Pubkey::new_unique(), 42);
activate_all_features(&mut genesis_config);
let bank = Bank::new_for_tests(&genesis_config);
// libsecp256k1
let secp_privkey = libsecp256k1::SecretKey::random(&mut rand::thread_rng());
let message_arr = b"hello";
let instruction = solana_sdk::secp256k1_instruction::new_secp256k1_instruction(
&secp_privkey,
message_arr,
);
let tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
// calling the program should be successful when called from the bank
// even if the program itself is not called
bank.process_transaction(&tx).unwrap();
// ed25519
let privkey = ed25519_dalek::Keypair::generate(&mut rand::thread_rng());
let message_arr = b"hello";
let instruction =
solana_sdk::ed25519_instruction::new_ed25519_instruction(&privkey, message_arr);
let tx = Transaction::new_signed_with_payer(
&[instruction],
Some(&mint_keypair.pubkey()),
&[&mint_keypair],
bank.last_blockhash(),
);
// calling the program should be successful when called from the bank
// even if the program itself is not called
bank.process_transaction(&tx).unwrap();
}
#[test]
fn test_calculate_fee() {
// Default: no fee.
let message =
SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap();
assert_eq!(Bank::calculate_fee(&message, 0), 0);
// One signature, a fee.
assert_eq!(Bank::calculate_fee(&message, 1), 1);
// Two signatures, double the fee.
let key0 = Pubkey::new_unique();
let key1 = Pubkey::new_unique();
let ix0 = system_instruction::transfer(&key0, &key1, 1);
let ix1 = system_instruction::transfer(&key1, &key0, 1);
let message = SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&key0))).unwrap();
assert_eq!(Bank::calculate_fee(&message, 2), 4);
}
#[test]
fn test_calculate_fee_secp256k1() {
let key0 = Pubkey::new_unique();
let key1 = Pubkey::new_unique();
let ix0 = system_instruction::transfer(&key0, &key1, 1);
let mut secp_instruction1 = Instruction {
program_id: secp256k1_program::id(),
accounts: vec![],
data: vec![],
};
let mut secp_instruction2 = Instruction {
program_id: secp256k1_program::id(),
accounts: vec![],
data: vec![1],
};
let message = SanitizedMessage::try_from(Message::new(
&[
ix0.clone(),
secp_instruction1.clone(),
secp_instruction2.clone(),
],
Some(&key0),
))
.unwrap();
assert_eq!(Bank::calculate_fee(&message, 1), 2);
secp_instruction1.data = vec![0];
secp_instruction2.data = vec![10];
let message = SanitizedMessage::try_from(Message::new(
&[ix0, secp_instruction1, secp_instruction2],
Some(&key0),
))
.unwrap();
assert_eq!(Bank::calculate_fee(&message, 1), 11);
}
#[test]
fn test_an_empty_instruction_without_program() {
let (genesis_config, mint_keypair) = create_genesis_config(1);
let destination = solana_sdk::pubkey::new_rand();
let mut ix = system_instruction::transfer(&mint_keypair.pubkey(), &destination, 0);
ix.program_id = native_loader::id(); // Empty executable account chain
let message = Message::new(&[ix], Some(&mint_keypair.pubkey()));
let tx = Transaction::new(&[&mint_keypair], message, genesis_config.hash());
let bank = Bank::new_for_tests(&genesis_config);
bank.process_transaction(&tx).unwrap();
let mut bank = Bank::new_for_tests(&genesis_config);
bank.activate_feature(&reject_empty_instruction_without_program::id());
assert_eq!(
bank.process_transaction(&tx).unwrap_err(),
TransactionError::InstructionError(0, InstructionError::UnsupportedProgramId),
);
}
#[test]
fn test_transaction_log_collector_get_logs_for_address() {
let address = Pubkey::new_unique();
let mut mentioned_address_map = HashMap::new();
mentioned_address_map.insert(address, vec![0]);
let transaction_log_collector = TransactionLogCollector {
mentioned_address_map,
..TransactionLogCollector::default()
};
assert_eq!(
transaction_log_collector.get_logs_for_address(Some(&address)),
Some(Vec::<TransactionLogInfo>::new()),
);
}
}
| 38.740614 | 132 | 0.578183 |
bb848ee0c4b271beaa0ba05a0c4cc2d6563343ab | 6,054 | use futures::{future, stream::SplitSink};
use futures_util::sink::SinkExt;
use std::sync::Arc;
use tokio::{
self,
net::TcpStream,
sync::{
mpsc::{self, UnboundedReceiver, UnboundedSender},
Mutex,
},
};
use tokio_util::codec::Framed;
use crate::communication::{
CommunicationError, ControlMessage, ControlMessageCodec, ControlMessageHandler,
InterProcessMessage, MessageCodec,
};
use crate::node::NodeId;
use crate::scheduler::endpoints_manager::ChannelsToSenders;
#[allow(dead_code)]
/// Listens on a `tokio::sync::mpsc` channel, and sends received messages on the network.
pub(crate) struct DataSender {
/// The id of the node the sink is sending data to.
node_id: NodeId,
/// Framed TCP write sink.
sink: SplitSink<Framed<TcpStream, MessageCodec>, InterProcessMessage>,
/// Tokio channel receiver on which to receive data from worker threads.
rx: UnboundedReceiver<InterProcessMessage>,
/// Tokio channel sender to `ControlMessageHandler`.
control_tx: UnboundedSender<ControlMessage>,
/// Tokio channel receiver from `ControlMessageHandler`.
control_rx: UnboundedReceiver<ControlMessage>,
}
impl DataSender {
pub(crate) async fn new(
node_id: NodeId,
sink: SplitSink<Framed<TcpStream, MessageCodec>, InterProcessMessage>,
channels_to_senders: Arc<Mutex<ChannelsToSenders>>,
control_handler: &mut ControlMessageHandler,
) -> Self {
// Create a channel for this stream.
let (tx, rx) = mpsc::unbounded_channel();
// Add entry in the shared state map.
channels_to_senders.lock().await.add_sender(node_id, tx);
// Set up control channel.
let (control_tx, control_rx) = mpsc::unbounded_channel();
control_handler.add_channel_to_data_sender(node_id, control_tx);
Self {
node_id,
sink,
rx,
control_tx: control_handler.get_channel_to_handler(),
control_rx,
}
}
pub(crate) async fn run(&mut self) -> Result<(), CommunicationError> {
// Notify `ControlMessageHandler` that sender is initialized.
self.control_tx
.send(ControlMessage::DataSenderInitialized(self.node_id))
.map_err(CommunicationError::from)?;
// TODO: listen on control_rx
loop {
match self.rx.recv().await {
Some(msg) => {
if let Err(e) = self.sink.send(msg).await.map_err(CommunicationError::from) {
return Err(e);
}
}
None => return Err(CommunicationError::Disconnected),
}
}
}
}
/// Sends messages received from operator executors to other nodes.
/// The function launches a task for each TCP sink. Each task listens
/// on a mpsc channel for new `InterProcessMessages` messages, which it
/// forwards on the TCP stream.
pub(crate) async fn run_senders(senders: Vec<DataSender>) -> Result<(), CommunicationError> {
// Waits until all futures complete. This code will only be reached
// when all the mpsc channels are closed.
future::join_all(
senders
.into_iter()
.map(|mut sender| tokio::spawn(async move { sender.run().await })),
)
.await;
Ok(())
}
#[allow(dead_code)]
/// Listens for control messages on a `tokio::sync::mpsc` channel, and sends received messages on the network.
pub(crate) struct ControlSender {
/// The id of the node the sink is sending data to.
node_id: NodeId,
/// Framed TCP write sink.
sink: SplitSink<Framed<TcpStream, ControlMessageCodec>, ControlMessage>,
/// Tokio channel receiver on which to receive data from worker threads.
rx: UnboundedReceiver<ControlMessage>,
/// Tokio channel sender to `ControlMessageHandler`.
control_tx: UnboundedSender<ControlMessage>,
/// Channel receiver for control messages intended for this `ControlSender`.
control_rx: UnboundedReceiver<ControlMessage>,
}
impl ControlSender {
pub(crate) fn new(
node_id: NodeId,
sink: SplitSink<Framed<TcpStream, ControlMessageCodec>, ControlMessage>,
control_handler: &mut ControlMessageHandler,
) -> Self {
// Set up channel to other node.
let (tx, rx) = mpsc::unbounded_channel();
control_handler.add_channel_to_node(node_id, tx);
// Set up control channel.
let (control_tx, control_rx) = mpsc::unbounded_channel();
control_handler.add_channel_to_control_sender(node_id, control_tx);
Self {
node_id,
sink,
rx,
control_tx: control_handler.get_channel_to_handler(),
control_rx,
}
}
pub(crate) async fn run(&mut self) -> Result<(), CommunicationError> {
// Notify `ControlMessageHandler` that sender is initialized.
self.control_tx
.send(ControlMessage::ControlSenderInitialized(self.node_id))
.map_err(CommunicationError::from)?;
// TODO: listen on control_rx
loop {
match self.rx.recv().await {
Some(msg) => {
if let Err(e) = self.sink.send(msg).await.map_err(CommunicationError::from) {
return Err(e);
}
}
None => {
return Err(CommunicationError::Disconnected);
}
}
}
}
}
/// Sends messages received from the control handler other nodes.
/// The function launches a task for each TCP sink. Each task listens
/// on a mpsc channel for new `ControlMessage`s, which it
/// forwards on the TCP stream.
pub(crate) async fn run_control_senders(
mut senders: Vec<ControlSender>,
) -> Result<(), CommunicationError> {
// Waits until all futures complete. This code will only be reached
// when all the mpsc channels are closed.
future::join_all(senders.iter_mut().map(|sender| sender.run())).await;
Ok(())
}
| 37.141104 | 110 | 0.636934 |
f8641722aee6b9a2afbcfff8b2a191e5979dc26f | 2,339 | use std::path::Path;
use clap::{App, Arg, ArgMatches};
use symbolic::common::{ByteView, DSymPathExt};
use symbolic::debuginfo::sourcebundle::SourceBundleWriter;
use symbolic::debuginfo::Archive;
fn print_error(mut error: &dyn std::error::Error) {
println!("Error: {}", error);
while let Some(source) = error.source() {
println!(" caused by {}", source);
error = source;
}
}
fn write_object_sources(path: &Path, output_path: &Path) -> Result<(), Box<dyn std::error::Error>> {
println!("Inspecting {}", path.display());
let dsym_path = path.resolve_dsym();
let buffer = ByteView::open(dsym_path.as_deref().unwrap_or(path))?;
let archive = Archive::parse(&buffer)?;
println!("File format: {}", archive.file_format());
for object in archive.objects() {
match object {
Ok(object) => {
let out = output_path.join(&format!("{}.zip", &object.debug_id()));
println!(" -> {}", out.display());
let writer = SourceBundleWriter::create(&out)?;
writer.write_object(&object, &path.file_name().unwrap().to_string_lossy())?;
}
Err(e) => {
print!(" - ");
print_error(&e);
continue;
}
}
}
Ok(())
}
fn execute(matches: &ArgMatches<'_>) {
let output_path = Path::new(matches.value_of("output").unwrap());
for path in matches.values_of("paths").unwrap_or_default() {
if let Err(e) = write_object_sources(Path::new(&path), output_path) {
print_error(e.as_ref());
}
println!();
}
}
fn main() {
let matches = App::new("object-debug")
.about("Shows some information on object files")
.arg(
Arg::with_name("paths")
.required(true)
.multiple(true)
.value_name("PATH")
.help("Path to the debug file")
.number_of_values(1)
.index(1),
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.required(true)
.value_name("PATH")
.help("Path to the source output folder"),
)
.get_matches();
execute(&matches);
}
| 28.876543 | 100 | 0.524156 |
895577925890e3ed1c6d40a45fd30feee90f24de | 3,204 | //! Tests for the join code.
use ThreadPoolBuilder;
use join::*;
use rand::{Rng, SeedableRng, XorShiftRng};
use rand::distributions::Standard;
use unwind;
fn quick_sort<T: PartialOrd + Send>(v: &mut [T]) {
if v.len() <= 1 {
return;
}
let mid = partition(v);
let (lo, hi) = v.split_at_mut(mid);
join(|| quick_sort(lo), || quick_sort(hi));
}
fn partition<T: PartialOrd + Send>(v: &mut [T]) -> usize {
let pivot = v.len() - 1;
let mut i = 0;
for j in 0..pivot {
if v[j] <= v[pivot] {
v.swap(i, j);
i += 1;
}
}
v.swap(i, pivot);
i
}
fn seeded_rng() -> XorShiftRng {
let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
(0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
XorShiftRng::from_seed(seed)
}
#[test]
fn sort() {
let mut rng = seeded_rng();
let mut data: Vec<u32> = rng.sample_iter(&Standard).take(6 * 1024).collect();
let mut sorted_data = data.clone();
sorted_data.sort();
quick_sort(&mut data);
assert_eq!(data, sorted_data);
}
#[test]
fn sort_in_pool() {
let mut rng = seeded_rng();
let mut data: Vec<u32> = rng.sample_iter(&Standard).take(12 * 1024).collect();
let pool = ThreadPoolBuilder::new().build().unwrap();
let mut sorted_data = data.clone();
sorted_data.sort();
pool.install(|| quick_sort(&mut data));
assert_eq!(data, sorted_data);
}
#[test]
#[should_panic(expected = "Hello, world!")]
fn panic_propagate_a() {
join(|| panic!("Hello, world!"), || ());
}
#[test]
#[should_panic(expected = "Hello, world!")]
fn panic_propagate_b() {
join(|| (), || panic!("Hello, world!"));
}
#[test]
#[should_panic(expected = "Hello, world!")]
fn panic_propagate_both() {
join(|| panic!("Hello, world!"), || panic!("Goodbye, world!"));
}
#[test]
fn panic_b_still_executes() {
let mut x = false;
match unwind::halt_unwinding(|| join(|| panic!("Hello, world!"), || x = true)) {
Ok(_) => panic!("failed to propagate panic from closure A,"),
Err(_) => assert!(x, "closure b failed to execute"),
}
}
#[test]
fn join_context_both() {
// If we're not in a pool, both should be marked stolen as they're injected.
let (a_migrated, b_migrated) = join_context(|a| a.migrated(), |b| b.migrated());
assert!(a_migrated);
assert!(b_migrated);
}
#[test]
fn join_context_neither() {
// If we're already in a 1-thread pool, neither job should be stolen.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
let (a_migrated, b_migrated) = pool.install(|| {
join_context(|a| a.migrated(), |b| b.migrated())
});
assert!(!a_migrated);
assert!(!b_migrated);
}
#[test]
fn join_context_second() {
use std::sync::Barrier;
// If we're already in a 2-thread pool, the second job should be stolen.
let barrier = Barrier::new(2);
let pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap();
let (a_migrated, b_migrated) = pool.install(|| {
join_context(|a| { barrier.wait(); a.migrated() },
|b| { barrier.wait(); b.migrated() })
});
assert!(!a_migrated);
assert!(b_migrated);
}
| 26.7 | 84 | 0.596442 |
72bb1bc187ce4e550d6299d861c539493def5fc1 | 5,272 | //! Raw handlers for input bytestreams.
use super::*;
use std::{
fmt::{Debug, Error as FormatError, Formatter},
fs::File,
io::{
BufReader,
Cursor,
Error as IoError,
ErrorKind as IoErrorKind,
Read,
Result as IoResult,
Seek,
SeekFrom,
},
result::Result as StdResult,
};
use streamcatcher::{Catcher, TxCatcher};
/// Usable data/byte sources for an audio stream.
///
/// Users may define their own data sources using [`Extension`]
/// and [`ExtensionSeek`].
///
/// [`Extension`]: Reader::Extension
/// [`ExtensionSeek`]: Reader::ExtensionSeek
pub enum Reader {
/// Piped output of another program (i.e., [`ffmpeg`]).
///
/// Does not support seeking.
///
/// [`ffmpeg`]: super::ffmpeg
Pipe(BufReader<ChildContainer>),
/// A cached, raw in-memory store, provided by Songbird.
///
/// Supports seeking.
Memory(Catcher<Box<Reader>>),
/// A cached, Opus-compressed in-memory store, provided by Songbird.
///
/// Supports seeking.
Compressed(TxCatcher<Box<Input>, OpusCompressor>),
/// A source which supports seeking by recreating its inout stream.
///
/// Supports seeking.
Restartable(Restartable),
/// A source contained in a local file.
///
/// Supports seeking.
File(BufReader<File>),
/// A source contained as an array in memory.
///
/// Supports seeking.
Vec(Cursor<Vec<u8>>),
/// A basic user-provided source.
///
/// Does not support seeking.
Extension(Box<dyn Read + Send>),
/// A user-provided source which also implements [`Seek`].
///
/// Supports seeking.
///
/// [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
ExtensionSeek(Box<dyn ReadSeek + Send>),
}
impl Reader {
/// Returns whether the given source implements [`Seek`].
///
/// [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
pub fn is_seekable(&self) -> bool {
use Reader::*;
match self {
Restartable(_) | Compressed(_) | Memory(_) => true,
Extension(_) => false,
ExtensionSeek(_) => true,
_ => false,
}
}
#[allow(clippy::single_match)]
pub(crate) fn prep_with_handle(&mut self, handle: Handle) {
use Reader::*;
match self {
Restartable(r) => r.prep_with_handle(handle),
_ => {},
}
}
}
impl Read for Reader {
fn read(&mut self, buffer: &mut [u8]) -> IoResult<usize> {
use Reader::*;
match self {
Pipe(a) => Read::read(a, buffer),
Memory(a) => Read::read(a, buffer),
Compressed(a) => Read::read(a, buffer),
Restartable(a) => Read::read(a, buffer),
File(a) => Read::read(a, buffer),
Vec(a) => Read::read(a, buffer),
Extension(a) => a.read(buffer),
ExtensionSeek(a) => a.read(buffer),
}
}
}
impl Seek for Reader {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
use Reader::*;
match self {
Pipe(_) | Extension(_) => Err(IoError::new(
IoErrorKind::InvalidInput,
"Seeking not supported on Reader of this type.",
)),
Memory(a) => Seek::seek(a, pos),
Compressed(a) => Seek::seek(a, pos),
File(a) => Seek::seek(a, pos),
Restartable(a) => Seek::seek(a, pos),
Vec(a) => Seek::seek(a, pos),
ExtensionSeek(a) => a.seek(pos),
}
}
}
impl Debug for Reader {
fn fmt(&self, f: &mut Formatter<'_>) -> StdResult<(), FormatError> {
use Reader::*;
let field = match self {
Pipe(a) => format!("{:?}", a),
Memory(a) => format!("{:?}", a),
Compressed(a) => format!("{:?}", a),
Restartable(a) => format!("{:?}", a),
File(a) => format!("{:?}", a),
Vec(a) => format!("{:?}", a),
Extension(_) => "Extension".to_string(),
ExtensionSeek(_) => "ExtensionSeek".to_string(),
};
f.debug_tuple("Reader").field(&field).finish()
}
}
impl From<Vec<u8>> for Reader {
fn from(val: Vec<u8>) -> Reader {
Reader::Vec(Cursor::new(val))
}
}
/// Fusion trait for custom input sources which allow seeking.
pub trait ReadSeek {
/// See [`Read::read`].
///
/// [`Read::read`]: https://doc.rust-lang.org/nightly/std/io/trait.Read.html#tymethod.read
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize>;
/// See [`Seek::seek`].
///
/// [`Seek::seek`]: https://doc.rust-lang.org/nightly/std/io/trait.Seek.html#tymethod.seek
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64>;
}
impl Read for dyn ReadSeek {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
ReadSeek::read(self, buf)
}
}
impl Seek for dyn ReadSeek {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
ReadSeek::seek(self, pos)
}
}
impl<R: Read + Seek> ReadSeek for R {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
Read::read(self, buf)
}
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
Seek::seek(self, pos)
}
}
| 29.127072 | 94 | 0.539264 |
ccc70fab3f8aec7a5166bf01ccfa12d9c804719c | 480 | /// Build a b-tree map using a nice syntax like in other
/// languages.
#[macro_export]
macro_rules! map {
// handle the case w/ a trailing comma by removing the comma
($($key:expr => $val:expr),*,) => (
$crate::map!($($key => $val),*)
);
// construct a btree map
($($key:expr => $val:expr),*) => ({
#[allow(unused_mut)]
let mut b_map = ::std::collections::BTreeMap::new();
$(b_map.insert($key, $val);)*
b_map
});
}
| 28.235294 | 64 | 0.53125 |
cce8d95e997509d75beea08219a94515e1bdd4e3 | 5,721 | use crate::{node::Node, Config};
use anyhow::{bail, Context, Result};
use log::{debug, info, warn};
use std::{
env::{split_paths, var, var_os},
fmt::Display,
fs::{self, read_to_string},
net::Ipv4Addr,
path::{Path, PathBuf},
process::Command,
};
pub struct System {
hosts: Option<String>,
}
impl System {
/// Create a new system
pub fn setup(config: &Config) -> Result<Self> {
if Self::in_container()? {
info!("Skipping modprobe and sysctl for sake of containerization")
} else {
for module in &["overlay", "br_netfilter", "ip_conntrack"] {
Self::modprobe(module)?;
}
for sysctl in &[
"net.bridge.bridge-nf-call-ip6tables",
"net.bridge.bridge-nf-call-iptables",
"net.ipv4.conf.all.route_localnet",
"net.ipv4.ip_forward",
] {
Self::sysctl_enable(sysctl)?;
}
}
let hosts = if config.multi_node() {
// Try to write the hostnames, which does not work on every system
let hosts_file = Self::hosts();
let hosts = read_to_string(&hosts_file)?;
let local_hosts = (0..config.nodes())
.map(|x| format!("{} {}", Ipv4Addr::LOCALHOST, Node::raw(x)))
.collect::<Vec<_>>();
let mut new_hosts = hosts
.lines()
.filter(|x| !local_hosts.iter().any(|y| x == y))
.map(|x| x.into())
.collect::<Vec<_>>();
new_hosts.extend(local_hosts);
match fs::write(&hosts_file, new_hosts.join("\n")) {
Err(e) => {
warn!(
"Unable to write hosts file '{}'. The nodes may be not reachable: {}",
hosts_file.display(),
e
);
None
}
_ => Some(hosts),
}
} else {
None
};
Ok(Self { hosts })
}
/// Returns true if the process is running inside a container
pub fn in_container() -> Result<bool> {
Ok(
read_to_string(PathBuf::from("/").join("proc").join("1").join("cgroup"))
.context("Unable to retrieve systems container status")?
.lines()
.any(|x| x.contains("libpod") || x.contains("podman") || x.contains("docker")),
)
}
/// Restore the initial system state
pub fn cleanup(&self) {
if let Some(hosts) = &self.hosts {
if let Err(e) = fs::write(Self::hosts(), hosts) {
warn!(
"Unable to restore hosts file, may need manual cleanup: {}",
e
)
}
}
}
/// Find an executable inside the current $PATH environment
pub fn find_executable<P>(name: P) -> Result<PathBuf>
where
P: AsRef<Path> + Display,
{
var_os("PATH")
.and_then(|paths| {
split_paths(&paths)
.filter_map(|dir| {
let full_path = dir.join(&name);
if full_path.is_file() {
Some(full_path)
} else {
None
}
})
.next()
})
.with_context(|| format!("Unable to find executable '{}' in $PATH", name))
}
/// Return the full path to the default system shell
pub fn shell() -> Result<String> {
let shell = var("SHELL").unwrap_or_else(|_| "sh".into());
Ok(format!(
"{}",
Self::find_executable(&shell)
.with_context(|| format!("Unable to find system shell '{}'", shell))?
.display()
))
}
/// Load a single kernel module via 'modprobe'
fn modprobe(module: &str) -> Result<()> {
debug!("Loading kernel module '{}'", module);
let output = Command::new("modprobe").arg(module).output()?;
if !output.status.success() {
bail!(
"Unable to load '{}' kernel module: {}",
module,
String::from_utf8(output.stderr)?,
);
}
Ok(())
}
/// Enable a single sysctl by setting it to '1'
fn sysctl_enable(key: &str) -> Result<()> {
debug!("Enabling sysctl '{}'", key);
let enable_arg = format!("{}=1", key);
let output = Command::new("sysctl").arg("-w").arg(&enable_arg).output()?;
let stderr = String::from_utf8(output.stderr)?;
if !stderr.is_empty() {
bail!("Unable to set sysctl '{}': {}", enable_arg, stderr);
}
Ok(())
}
fn hosts() -> PathBuf {
PathBuf::from("/").join("etc").join("hosts")
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env::set_var;
const VALID_EXECUTABLE: &str = "runc";
const INVALID_EXECUTABLE: &str = "should-not-exist";
#[test]
fn module_failure() {
assert!(System::modprobe("invalid").is_err());
}
#[test]
fn sysctl_failure() {
assert!(System::sysctl_enable("invalid").is_err());
}
#[test]
fn find_executable_success() {
assert!(System::find_executable(VALID_EXECUTABLE).is_ok());
}
#[test]
fn find_executable_failure() {
assert!(System::find_executable(INVALID_EXECUTABLE).is_err());
}
#[test]
fn find_shell_success() {
set_var("SHELL", VALID_EXECUTABLE);
assert!(System::shell().is_ok());
}
}
| 30.430851 | 95 | 0.48121 |
11a52ecdc295a277b1c4e310406a0b702a539567 | 2,336 | const INPUT: &str = include_str!("../input/day03.txt");
pub(crate) fn day03_part1() -> u32 {
let numbers = parse(INPUT);
gamma_times_epsilon(numbers)
}
pub(crate) fn day03_part2() -> u32 {
let numbers = parse(INPUT);
reduce_numbers(numbers)
}
fn gamma_times_epsilon(numbers: Vec<Vec<bool>>) -> u32 {
let len = numbers[0].len();
let mut gamma = Vec::with_capacity(len);
for i in 0..len {
let (ones, zeroes) = count_ones_and_zeroes_at_index(&numbers, i);
gamma.push(ones >= zeroes);
}
let gamma = to_decimal(&gamma);
let epsilon = (1 << len) - 1 - gamma; // complement of gamma
gamma * epsilon
}
fn count_ones_and_zeroes_at_index(numbers: &[Vec<bool>], i: usize) -> (usize, usize) {
let ones = numbers.iter().filter(|bits| bits[i]).count();
let zeroes = numbers.len() - ones;
(ones, zeroes)
}
fn reduce_numbers(numbers: Vec<Vec<bool>>) -> u32 {
let og_rating = reduce(numbers.clone(), |ones, zeroes| ones >= zeroes);
let cs_rating = reduce(numbers, |ones, zeroes| ones < zeroes);
og_rating * cs_rating
}
type Filter = fn(usize, usize) -> bool;
fn reduce(mut numbers: Vec<Vec<bool>>, wanted: Filter) -> u32 {
let mut i = 0;
while numbers.len() > 1 {
let (ones, zeroes) = count_ones_and_zeroes_at_index(&numbers, i);
numbers.retain(|bits| bits[i] == wanted(ones, zeroes));
i += 1;
}
to_decimal(&numbers[0])
}
fn to_decimal(bits: &[bool]) -> u32 {
bits.iter()
.map(|&is_one| if is_one { 1 } else { 0 })
.fold(0, |a, i| (a << 1) + i)
}
fn parse(input: &str) -> Vec<Vec<bool>> {
input
.trim()
.lines()
.map(|s| s.chars().map(|c| c == '1').collect::<Vec<_>>())
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
const EXAMPLE: &str = "\
00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010";
#[test]
fn example1() {
let numbers = parse(EXAMPLE);
assert_eq!(22 * 9, gamma_times_epsilon(numbers));
}
#[test]
fn example2() {
let numbers = parse(EXAMPLE);
assert_eq!(23 * 10, reduce_numbers(numbers));
}
#[test]
fn part1() {
assert_eq!(day03_part1(), 284 * 3811);
}
#[test]
fn part2() {
assert_eq!(day03_part2(), 486 * 2784);
}
}
| 22.901961 | 86 | 0.578339 |
fc7caee04b179b3d464d3eb08e130af7ce01cb9a | 16,686 | //! Assorted functions shared by several assists.
pub(crate) mod suggest_name;
use std::ops;
use ast::TypeBoundsOwner;
use hir::{Adt, HasSource, Semantics};
use ide_db::{
helpers::{FamousDefs, SnippetCap},
RootDatabase,
};
use itertools::Itertools;
use stdx::format_to;
use syntax::{
ast::edit::AstNodeEdit,
ast::AttrsOwner,
ast::NameOwner,
ast::{self, edit, make, ArgListOwner, GenericParamsOwner},
ted, AstNode, Direction, SmolStr,
SyntaxKind::*,
SyntaxNode, TextSize, T,
};
use crate::{
assist_context::{AssistBuilder, AssistContext},
ast_transform::{self, AstTransform, QualifyPaths, SubstituteTypeParams},
};
pub(crate) fn unwrap_trivial_block(block: ast::BlockExpr) -> ast::Expr {
extract_trivial_expression(&block)
.filter(|expr| !expr.syntax().text().contains_char('\n'))
.unwrap_or_else(|| block.into())
}
pub fn extract_trivial_expression(block: &ast::BlockExpr) -> Option<ast::Expr> {
let has_anything_else = |thing: &SyntaxNode| -> bool {
let mut non_trivial_children =
block.syntax().children_with_tokens().filter(|it| match it.kind() {
WHITESPACE | T!['{'] | T!['}'] => false,
_ => it.as_node() != Some(thing),
});
non_trivial_children.next().is_some()
};
if let Some(expr) = block.tail_expr() {
if has_anything_else(expr.syntax()) {
return None;
}
return Some(expr);
}
// Unwrap `{ continue; }`
let (stmt,) = block.statements().next_tuple()?;
if let ast::Stmt::ExprStmt(expr_stmt) = stmt {
if has_anything_else(expr_stmt.syntax()) {
return None;
}
let expr = expr_stmt.expr()?;
match expr.syntax().kind() {
CONTINUE_EXPR | BREAK_EXPR | RETURN_EXPR => return Some(expr),
_ => (),
}
}
None
}
/// This is a method with a heuristics to support test methods annotated with custom test annotations, such as
/// `#[test_case(...)]`, `#[tokio::test]` and similar.
/// Also a regular `#[test]` annotation is supported.
///
/// It may produce false positives, for example, `#[wasm_bindgen_test]` requires a different command to run the test,
/// but it's better than not to have the runnables for the tests at all.
pub fn test_related_attribute(fn_def: &ast::Fn) -> Option<ast::Attr> {
fn_def.attrs().find_map(|attr| {
let path = attr.path()?;
if path.syntax().text().to_string().contains("test") {
Some(attr)
} else {
None
}
})
}
#[derive(Copy, Clone, PartialEq)]
pub enum DefaultMethods {
Only,
No,
}
pub fn filter_assoc_items(
db: &RootDatabase,
items: &[hir::AssocItem],
default_methods: DefaultMethods,
) -> Vec<ast::AssocItem> {
fn has_def_name(item: &ast::AssocItem) -> bool {
match item {
ast::AssocItem::Fn(def) => def.name(),
ast::AssocItem::TypeAlias(def) => def.name(),
ast::AssocItem::Const(def) => def.name(),
ast::AssocItem::MacroCall(_) => None,
}
.is_some()
}
items
.iter()
// Note: This throws away items with no source.
.filter_map(|i| {
let item = match i {
hir::AssocItem::Function(i) => ast::AssocItem::Fn(i.source(db)?.value),
hir::AssocItem::TypeAlias(i) => ast::AssocItem::TypeAlias(i.source(db)?.value),
hir::AssocItem::Const(i) => ast::AssocItem::Const(i.source(db)?.value),
};
Some(item)
})
.filter(has_def_name)
.filter(|it| match it {
ast::AssocItem::Fn(def) => matches!(
(default_methods, def.body()),
(DefaultMethods::Only, Some(_)) | (DefaultMethods::No, None)
),
_ => default_methods == DefaultMethods::No,
})
.collect::<Vec<_>>()
}
pub fn add_trait_assoc_items_to_impl(
sema: &hir::Semantics<ide_db::RootDatabase>,
items: Vec<ast::AssocItem>,
trait_: hir::Trait,
impl_: ast::Impl,
target_scope: hir::SemanticsScope,
) -> (ast::Impl, ast::AssocItem) {
let source_scope = sema.scope_for_def(trait_);
let ast_transform = QualifyPaths::new(&target_scope, &source_scope)
.or(SubstituteTypeParams::for_trait_impl(&source_scope, trait_, impl_.clone()));
let items = items
.into_iter()
.map(|it| it.clone_for_update())
.inspect(|it| ast_transform::apply(&*ast_transform, it))
.map(|it| edit::remove_attrs_and_docs(&it).clone_subtree().clone_for_update());
let res = impl_.clone_for_update();
let assoc_item_list = res.get_or_create_assoc_item_list();
let mut first_item = None;
for item in items {
first_item.get_or_insert_with(|| item.clone());
match &item {
ast::AssocItem::Fn(fn_) if fn_.body().is_none() => {
let body = make::block_expr(None, Some(make::ext::expr_todo()))
.indent(edit::IndentLevel(1));
ted::replace(fn_.get_or_create_body().syntax(), body.clone_for_update().syntax())
}
ast::AssocItem::TypeAlias(type_alias) => {
if let Some(type_bound_list) = type_alias.type_bound_list() {
type_bound_list.remove()
}
}
_ => {}
}
assoc_item_list.add_item(item)
}
(res, first_item.unwrap())
}
#[derive(Clone, Copy, Debug)]
pub(crate) enum Cursor<'a> {
Replace(&'a SyntaxNode),
Before(&'a SyntaxNode),
}
impl<'a> Cursor<'a> {
fn node(self) -> &'a SyntaxNode {
match self {
Cursor::Replace(node) | Cursor::Before(node) => node,
}
}
}
pub(crate) fn render_snippet(_cap: SnippetCap, node: &SyntaxNode, cursor: Cursor) -> String {
assert!(cursor.node().ancestors().any(|it| it == *node));
let range = cursor.node().text_range() - node.text_range().start();
let range: ops::Range<usize> = range.into();
let mut placeholder = cursor.node().to_string();
escape(&mut placeholder);
let tab_stop = match cursor {
Cursor::Replace(placeholder) => format!("${{0:{}}}", placeholder),
Cursor::Before(placeholder) => format!("$0{}", placeholder),
};
let mut buf = node.to_string();
buf.replace_range(range, &tab_stop);
return buf;
fn escape(buf: &mut String) {
stdx::replace(buf, '{', r"\{");
stdx::replace(buf, '}', r"\}");
stdx::replace(buf, '$', r"\$");
}
}
pub(crate) fn vis_offset(node: &SyntaxNode) -> TextSize {
node.children_with_tokens()
.find(|it| !matches!(it.kind(), WHITESPACE | COMMENT | ATTR))
.map(|it| it.text_range().start())
.unwrap_or_else(|| node.text_range().start())
}
pub(crate) fn invert_boolean_expression(
sema: &Semantics<RootDatabase>,
expr: ast::Expr,
) -> ast::Expr {
if let Some(expr) = invert_special_case(sema, &expr) {
return expr;
}
make::expr_prefix(T![!], expr)
}
fn invert_special_case(sema: &Semantics<RootDatabase>, expr: &ast::Expr) -> Option<ast::Expr> {
match expr {
ast::Expr::BinExpr(bin) => match bin.op_kind()? {
ast::BinOp::NegatedEqualityTest => bin.replace_op(T![==]).map(|it| it.into()),
ast::BinOp::EqualityTest => bin.replace_op(T![!=]).map(|it| it.into()),
// Swap `<` with `>=`, `<=` with `>`, ... if operands `impl Ord`
ast::BinOp::LesserTest if bin_impls_ord(sema, bin) => {
bin.replace_op(T![>=]).map(|it| it.into())
}
ast::BinOp::LesserEqualTest if bin_impls_ord(sema, bin) => {
bin.replace_op(T![>]).map(|it| it.into())
}
ast::BinOp::GreaterTest if bin_impls_ord(sema, bin) => {
bin.replace_op(T![<=]).map(|it| it.into())
}
ast::BinOp::GreaterEqualTest if bin_impls_ord(sema, bin) => {
bin.replace_op(T![<]).map(|it| it.into())
}
// Parenthesize other expressions before prefixing `!`
_ => Some(make::expr_prefix(T![!], make::expr_paren(expr.clone()))),
},
ast::Expr::MethodCallExpr(mce) => {
let receiver = mce.receiver()?;
let method = mce.name_ref()?;
let arg_list = mce.arg_list()?;
let method = match method.text().as_str() {
"is_some" => "is_none",
"is_none" => "is_some",
"is_ok" => "is_err",
"is_err" => "is_ok",
_ => return None,
};
Some(make::expr_method_call(receiver, method, arg_list))
}
ast::Expr::PrefixExpr(pe) if pe.op_kind()? == ast::PrefixOp::Not => {
if let ast::Expr::ParenExpr(parexpr) = pe.expr()? {
parexpr.expr()
} else {
pe.expr()
}
}
// FIXME:
// ast::Expr::Literal(true | false )
_ => None,
}
}
fn bin_impls_ord(sema: &Semantics<RootDatabase>, bin: &ast::BinExpr) -> bool {
match (
bin.lhs().and_then(|lhs| sema.type_of_expr(&lhs)),
bin.rhs().and_then(|rhs| sema.type_of_expr(&rhs)),
) {
(Some(lhs_ty), Some(rhs_ty)) if lhs_ty == rhs_ty => {
let krate = sema.scope(bin.syntax()).module().map(|it| it.krate());
let ord_trait = FamousDefs(sema, krate).core_cmp_Ord();
ord_trait.map_or(false, |ord_trait| {
lhs_ty.autoderef(sema.db).any(|ty| ty.impls_trait(sema.db, ord_trait, &[]))
})
}
_ => false,
}
}
pub(crate) fn next_prev() -> impl Iterator<Item = Direction> {
[Direction::Next, Direction::Prev].iter().copied()
}
pub(crate) fn does_pat_match_variant(pat: &ast::Pat, var: &ast::Pat) -> bool {
let first_node_text = |pat: &ast::Pat| pat.syntax().first_child().map(|node| node.text());
let pat_head = match pat {
ast::Pat::IdentPat(bind_pat) => {
if let Some(p) = bind_pat.pat() {
first_node_text(&p)
} else {
return pat.syntax().text() == var.syntax().text();
}
}
pat => first_node_text(pat),
};
let var_head = first_node_text(var);
pat_head == var_head
}
// Uses a syntax-driven approach to find any impl blocks for the struct that
// exist within the module/file
//
// Returns `None` if we've found an existing fn
//
// FIXME: change the new fn checking to a more semantic approach when that's more
// viable (e.g. we process proc macros, etc)
// FIXME: this partially overlaps with `find_impl_block_*`
pub(crate) fn find_struct_impl(
ctx: &AssistContext,
strukt: &ast::Adt,
name: &str,
) -> Option<Option<ast::Impl>> {
let db = ctx.db();
let module = strukt.syntax().ancestors().find(|node| {
ast::Module::can_cast(node.kind()) || ast::SourceFile::can_cast(node.kind())
})?;
let struct_def = match strukt {
ast::Adt::Enum(e) => Adt::Enum(ctx.sema.to_def(e)?),
ast::Adt::Struct(s) => Adt::Struct(ctx.sema.to_def(s)?),
ast::Adt::Union(u) => Adt::Union(ctx.sema.to_def(u)?),
};
let block = module.descendants().filter_map(ast::Impl::cast).find_map(|impl_blk| {
let blk = ctx.sema.to_def(&impl_blk)?;
// FIXME: handle e.g. `struct S<T>; impl<U> S<U> {}`
// (we currently use the wrong type parameter)
// also we wouldn't want to use e.g. `impl S<u32>`
let same_ty = match blk.self_ty(db).as_adt() {
Some(def) => def == struct_def,
None => false,
};
let not_trait_impl = blk.trait_(db).is_none();
if !(same_ty && not_trait_impl) {
None
} else {
Some(impl_blk)
}
});
if let Some(ref impl_blk) = block {
if has_fn(impl_blk, name) {
return None;
}
}
Some(block)
}
fn has_fn(imp: &ast::Impl, rhs_name: &str) -> bool {
if let Some(il) = imp.assoc_item_list() {
for item in il.assoc_items() {
if let ast::AssocItem::Fn(f) = item {
if let Some(name) = f.name() {
if name.text().eq_ignore_ascii_case(rhs_name) {
return true;
}
}
}
}
}
false
}
/// Find the start of the `impl` block for the given `ast::Impl`.
//
// FIXME: this partially overlaps with `find_struct_impl`
pub(crate) fn find_impl_block_start(impl_def: ast::Impl, buf: &mut String) -> Option<TextSize> {
buf.push('\n');
let start = impl_def.assoc_item_list().and_then(|it| it.l_curly_token())?.text_range().end();
Some(start)
}
/// Find the end of the `impl` block for the given `ast::Impl`.
//
// FIXME: this partially overlaps with `find_struct_impl`
pub(crate) fn find_impl_block_end(impl_def: ast::Impl, buf: &mut String) -> Option<TextSize> {
buf.push('\n');
let end = impl_def
.assoc_item_list()
.and_then(|it| it.r_curly_token())?
.prev_sibling_or_token()?
.text_range()
.end();
Some(end)
}
// Generates the surrounding `impl Type { <code> }` including type and lifetime
// parameters
pub(crate) fn generate_impl_text(adt: &ast::Adt, code: &str) -> String {
generate_impl_text_inner(adt, None, code)
}
// Generates the surrounding `impl <trait> for Type { <code> }` including type
// and lifetime parameters
pub(crate) fn generate_trait_impl_text(adt: &ast::Adt, trait_text: &str, code: &str) -> String {
generate_impl_text_inner(adt, Some(trait_text), code)
}
fn generate_impl_text_inner(adt: &ast::Adt, trait_text: Option<&str>, code: &str) -> String {
let generic_params = adt.generic_param_list();
let mut buf = String::with_capacity(code.len());
buf.push_str("\n\n");
adt.attrs()
.filter(|attr| attr.as_simple_call().map(|(name, _arg)| name == "cfg").unwrap_or(false))
.for_each(|attr| buf.push_str(format!("{}\n", attr.to_string()).as_str()));
buf.push_str("impl");
if let Some(generic_params) = &generic_params {
let lifetimes = generic_params.lifetime_params().map(|lt| format!("{}", lt.syntax()));
let type_params = generic_params.type_params().map(|type_param| {
let mut buf = String::new();
if let Some(it) = type_param.name() {
format_to!(buf, "{}", it.syntax());
}
if let Some(it) = type_param.colon_token() {
format_to!(buf, "{} ", it);
}
if let Some(it) = type_param.type_bound_list() {
format_to!(buf, "{}", it.syntax());
}
buf
});
let const_params = generic_params.const_params().map(|t| t.syntax().to_string());
let generics = lifetimes.chain(type_params).chain(const_params).format(", ");
format_to!(buf, "<{}>", generics);
}
buf.push(' ');
if let Some(trait_text) = trait_text {
buf.push_str(trait_text);
buf.push_str(" for ");
}
buf.push_str(&adt.name().unwrap().text());
if let Some(generic_params) = generic_params {
let lifetime_params = generic_params
.lifetime_params()
.filter_map(|it| it.lifetime())
.map(|it| SmolStr::from(it.text()));
let type_params = generic_params
.type_params()
.filter_map(|it| it.name())
.map(|it| SmolStr::from(it.text()));
let const_params = generic_params
.const_params()
.filter_map(|it| it.name())
.map(|it| SmolStr::from(it.text()));
format_to!(buf, "<{}>", lifetime_params.chain(type_params).chain(const_params).format(", "))
}
match adt.where_clause() {
Some(where_clause) => {
format_to!(buf, "\n{}\n{{\n{}\n}}", where_clause, code);
}
None => {
format_to!(buf, " {{\n{}\n}}", code);
}
}
buf
}
pub(crate) fn add_method_to_adt(
builder: &mut AssistBuilder,
adt: &ast::Adt,
impl_def: Option<ast::Impl>,
method: &str,
) {
let mut buf = String::with_capacity(method.len() + 2);
if impl_def.is_some() {
buf.push('\n');
}
buf.push_str(method);
let start_offset = impl_def
.and_then(|impl_def| find_impl_block_end(impl_def, &mut buf))
.unwrap_or_else(|| {
buf = generate_impl_text(&adt, &buf);
adt.syntax().text_range().end()
});
builder.insert(start_offset, buf);
}
| 33.573441 | 117 | 0.565444 |
7acaef03109672649e7d43e251f1efd4e5085255 | 10,959 | //! Provides utility functions for [ServerPlugin] and [ServerHandle] implementations that use Docker Compose.
//!
//! These functions all assume that the server has a dedicated directory, which contains a custom shell
//! script that wraps `docker-compose` with any setup, environment variables, etc. needed to run things
//! correctly for that FHIR server.
use super::ServerPluginWrapper;
use crate::servers::{ServerHandle, ServerName, ServerPlugin};
use crate::AppState;
use async_trait::async_trait;
use eyre::{eyre, Context, Result};
use std::ffi::OsStr;
use std::fmt::Debug;
use std::path::PathBuf;
use std::process::Command;
use std::process::Output;
use url::Url;
/// Each instance of this struct represents a particular FHIR Server implementation, where the implementation
/// is launched and managed via Docker Compose.
#[derive(Clone, Debug)]
pub struct DockerComposeServerPlugin {
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory:
fn(client: reqwest::Client, method: http::Method, url: Url) -> reqwest::RequestBuilder,
}
impl DockerComposeServerPlugin {
/// Returns the [PathBuf] to the `docker compose` wrapper script for this server.
fn server_script(&self) -> PathBuf {
self.server_script.clone()
}
/// Returns the base [Url] that the server will use, once launched.
fn base_url(&self) -> &Url {
&self.base_url
}
}
impl DockerComposeServerPlugin {
/// Constructs a new `DockerComposeServerPlugin` instance that will represent a particular FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_name`: the [ServerName] that will uniquely identify the FHIR Server implemenation
/// * `server_script`: a [PathBuf] to the shell script that wraps the `docker compose` command for this
/// particular FHIR Server implementation
/// * `base_url`: the base [Url] that should be used for all requests to the FHIR Server, once launched
/// * `request_builder_factory`: a function that can produce the [reqwest::RequestBuilder] to use when
/// querying the FHIR Server, once launched
pub fn new(
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory: fn(
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder,
) -> DockerComposeServerPlugin {
DockerComposeServerPlugin {
server_name,
server_script,
base_url,
request_builder_factory,
}
}
}
#[async_trait]
impl ServerPlugin for DockerComposeServerPlugin {
fn server_name(&self) -> &ServerName {
&self.server_name
}
async fn launch(&self, app_state: &AppState) -> Result<Box<dyn ServerHandle>> {
launch_server(app_state, self).await
}
}
/// Runs the specified Docker Compose subcommand with the specified argument, for the specified FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_plugin`: the [DockerComposeServerPlugin] that represents the FHIR Server implementation to run
/// the command for/against
/// * `args`: the Docker Compose subcommand and options to run, e.g. `["up", "--detach"]`
#[tracing::instrument(level = "info", skip(server_plugin))]
fn run_docker_compose<I, S>(server_plugin: &DockerComposeServerPlugin, args: I) -> Result<Output>
where
I: IntoIterator<Item = S> + Debug,
S: AsRef<OsStr>,
{
/*
* Build and launch the FHIR server.
*/
let docker_compose_output = Command::new(server_plugin.server_script())
.args(args)
.output()
.with_context(|| {
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
)
})?;
if !docker_compose_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_compose_output.status,
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_compose_output.stdout).into(),
String::from_utf8_lossy(&docker_compose_output.stderr).into()
)));
}
Ok(docker_compose_output)
}
/// Launches the server, producing a boxed [SparkFhirServerHandle].
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_plugin`: the [DockerComposeServerPlugin] for the server to launch
async fn launch_server(
app_state: &AppState,
server_plugin: &DockerComposeServerPlugin,
) -> Result<Box<dyn ServerHandle>> {
/*
* Build and launch the server.
*/
run_docker_compose(server_plugin, &["up", "--detach"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
/*
* The server containers have now been started, though they're not necessarily ready yet. Build a
* handle for it, copying any fields from the plugin that will be needed (as we can't safely downcast
* the plugin, so this is the only way to have access to those fields from the handle).
*/
let server_plugin = app_state
.find_server_plugin(server_plugin.server_name().as_str())
.expect("Unable to find server plugin");
let http_client = super::client_default()?;
let server_handle = DockerComposeServerHandle {
server_plugin: server_plugin.clone(),
http_client,
};
// Wait (up to a timeout) for the server to be ready.
match wait_for_ready(app_state, &server_handle).await {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => {
let server_handle: Box<dyn ServerHandle> = Box::new(server_handle);
Ok(server_handle)
}
}
}
/// Checks the specified server repeatedly to see if it is ready, up to a hardcoded timeout.
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_handle`: the [DockerComposeServerPlugin] to test
///
/// Returns an empty [Result], where an error indicates that the server was not ready.
#[tracing::instrument(level = "debug", skip(app_state, server_handle))]
async fn wait_for_ready(
app_state: &AppState,
server_handle: &DockerComposeServerHandle,
) -> Result<()> {
let probe_result = tokio::time::timeout(std::time::Duration::from_secs(60 * 5), async {
let mut ready = false;
let mut probe = None;
while !ready {
probe = Some(
crate::test_framework::metadata::check_metadata_operation(app_state, server_handle)
.await,
);
ready = probe.as_ref().expect("probe result missing").is_ok();
if !ready {
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}
probe.expect("probe results missing")
})
.await
.with_context(|| {
format!(
"Timed out while waiting for server '{}' to launch.",
server_handle.plugin().server_name()
)
})?;
match probe_result {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => Ok(()),
}
}
/// Represents a running instance of a [DockerComposeServerPlugin] instance.
struct DockerComposeServerHandle {
server_plugin: ServerPluginWrapper,
http_client: reqwest::Client,
}
#[async_trait]
impl ServerHandle for DockerComposeServerHandle {
fn plugin(&self) -> &ServerPluginWrapper {
&self.server_plugin
}
fn base_url(&self) -> url::Url {
let server_plugin = server_plugin_downcast(self);
server_plugin.base_url().clone()
}
fn client(&self) -> Result<reqwest::Client> {
Ok(self.http_client.clone())
}
fn request_builder(
&self,
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder {
let server_plugin = server_plugin_downcast(self);
(server_plugin.request_builder_factory)(client, method, url)
}
fn emit_logs(&self) -> Result<String> {
let server_plugin = server_plugin_downcast(self);
match run_docker_compose(server_plugin, &["logs", "--no-color"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
}) {
Ok(output) => Ok(String::from_utf8_lossy(&output.stdout).to_string()),
Err(err) => Err(err),
}
}
#[tracing::instrument(level = "debug", skip(self, app_state))]
async fn expunge_all_content(&self, app_state: &AppState) -> Result<()> {
self.shutdown()?;
let server_plugin = server_plugin_downcast(self);
launch_server(app_state, server_plugin).await?;
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
fn shutdown(&self) -> Result<()> {
let server_plugin = server_plugin_downcast(self);
let docker_down_output =
run_docker_compose(server_plugin, &["down"]).with_context(|| {
format!(
"Running '{} down' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
if !docker_down_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_down_output.status,
format!(
"Failed to shutdown '{}' via Docker Compose.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_down_output.stdout).into(),
String::from_utf8_lossy(&docker_down_output.stderr).into()
)));
}
Ok(())
}
}
/// Extract the downcast [DockerComposeServerPlugin] from the specified [DockerComposeServerHandle].
fn server_plugin_downcast(server_handle: &DockerComposeServerHandle) -> &DockerComposeServerPlugin {
match &server_handle.server_plugin {
ServerPluginWrapper::DockerComposeServerPlugin(server_plugin) => server_plugin,
#[allow(unreachable_patterns)]
_ => panic!("Unsupported downcast attempt."),
}
}
| 34.790476 | 109 | 0.617575 |
9174a7749a15a6093a37d63eba47e11d5ee7e4be | 73,289 | use crate::heritage::{Context, Heritage};
use crate::input::{Print, Source, TemplateInput};
use crate::parser::{parse, Cond, CondTest, Expr, Loop, Node, Target, When, Whitespace, Ws};
use crate::{
filters, get_template_source, read_config_file, CompileError, Config, WhitespaceHandling,
};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::{cmp, hash, mem, str};
/// The actual implementation for askama_derive::Template
#[doc(hidden)]
pub fn derive_template(input: TokenStream) -> TokenStream {
let ast: syn::DeriveInput = syn::parse2(input).unwrap();
match build_template(&ast) {
Ok(source) => source.parse().unwrap(),
Err(e) => e.into_compile_error(),
}
}
/// Takes a `syn::DeriveInput` and generates source code for it
///
/// Reads the metadata from the `template()` attribute to get the template
/// metadata, then fetches the source from the filesystem. The source is
/// parsed, and the parse tree is fed to the code generator. Will print
/// the parse tree and/or generated source according to the `print` key's
/// value as passed to the `template()` attribute.
fn build_template(ast: &syn::DeriveInput) -> Result<String, CompileError> {
let template_args = TemplateArgs::new(ast)?;
let config_toml = read_config_file(&template_args.config_path)?;
let config = Config::new(&config_toml)?;
let input = TemplateInput::new(ast, &config, template_args)?;
let source: String = match input.source {
Source::Source(ref s) => s.clone(),
Source::Path(_) => get_template_source(&input.path)?,
};
let mut sources = HashMap::new();
find_used_templates(&input, &mut sources, source)?;
let mut parsed = HashMap::new();
for (path, src) in &sources {
parsed.insert(path.as_path(), parse(src, input.syntax)?);
}
let mut contexts = HashMap::new();
for (path, nodes) in &parsed {
contexts.insert(*path, Context::new(input.config, path, nodes)?);
}
let ctx = &contexts[input.path.as_path()];
let heritage = if !ctx.blocks.is_empty() || ctx.extends.is_some() {
Some(Heritage::new(ctx, &contexts))
} else {
None
};
if input.print == Print::Ast || input.print == Print::All {
eprintln!("{:?}", parsed[input.path.as_path()]);
}
let code = Generator::new(
&input,
&contexts,
heritage.as_ref(),
MapChain::new(),
config.whitespace,
)
.build(&contexts[input.path.as_path()])?;
if input.print == Print::Code || input.print == Print::All {
eprintln!("{}", code);
}
Ok(code)
}
#[derive(Default)]
pub(crate) struct TemplateArgs {
pub(crate) source: Option<Source>,
pub(crate) print: Print,
pub(crate) escaping: Option<String>,
pub(crate) ext: Option<String>,
pub(crate) syntax: Option<String>,
pub(crate) config_path: Option<String>,
}
impl TemplateArgs {
fn new(ast: &'_ syn::DeriveInput) -> Result<Self, CompileError> {
// Check that an attribute called `template()` exists once and that it is
// the proper type (list).
let mut template_args = None;
for attr in &ast.attrs {
let ident = match attr.path.get_ident() {
Some(ident) => ident,
None => continue,
};
if ident == "template" {
if template_args.is_some() {
return Err("duplicated 'template' attribute".into());
}
match attr.parse_meta() {
Ok(syn::Meta::List(syn::MetaList { nested, .. })) => {
template_args = Some(nested);
}
Ok(_) => return Err("'template' attribute must be a list".into()),
Err(e) => return Err(format!("unable to parse attribute: {}", e).into()),
}
}
}
let template_args =
template_args.ok_or_else(|| CompileError::from("no attribute 'template' found"))?;
let mut args = Self::default();
// Loop over the meta attributes and find everything that we
// understand. Return a CompileError if something is not right.
// `source` contains an enum that can represent `path` or `source`.
for item in template_args {
let pair = match item {
syn::NestedMeta::Meta(syn::Meta::NameValue(ref pair)) => pair,
_ => {
return Err(format!(
"unsupported attribute argument {:?}",
item.to_token_stream()
)
.into())
}
};
let ident = match pair.path.get_ident() {
Some(ident) => ident,
None => unreachable!("not possible in syn::Meta::NameValue(…)"),
};
if ident == "path" {
if let syn::Lit::Str(ref s) = pair.lit {
if args.source.is_some() {
return Err("must specify 'source' or 'path', not both".into());
}
args.source = Some(Source::Path(s.value()));
} else {
return Err("template path must be string literal".into());
}
} else if ident == "source" {
if let syn::Lit::Str(ref s) = pair.lit {
if args.source.is_some() {
return Err("must specify 'source' or 'path', not both".into());
}
args.source = Some(Source::Source(s.value()));
} else {
return Err("template source must be string literal".into());
}
} else if ident == "print" {
if let syn::Lit::Str(ref s) = pair.lit {
args.print = s.value().parse()?;
} else {
return Err("print value must be string literal".into());
}
} else if ident == "escape" {
if let syn::Lit::Str(ref s) = pair.lit {
args.escaping = Some(s.value());
} else {
return Err("escape value must be string literal".into());
}
} else if ident == "ext" {
if let syn::Lit::Str(ref s) = pair.lit {
args.ext = Some(s.value());
} else {
return Err("ext value must be string literal".into());
}
} else if ident == "syntax" {
if let syn::Lit::Str(ref s) = pair.lit {
args.syntax = Some(s.value())
} else {
return Err("syntax value must be string literal".into());
}
} else if ident == "config" {
if let syn::Lit::Str(ref s) = pair.lit {
args.config_path = Some(s.value())
} else {
return Err("config value must be string literal".into());
}
} else {
return Err(format!("unsupported attribute key {:?} found", ident).into());
}
}
Ok(args)
}
}
fn find_used_templates(
input: &TemplateInput<'_>,
map: &mut HashMap<PathBuf, String>,
source: String,
) -> Result<(), CompileError> {
let mut dependency_graph = Vec::new();
let mut check = vec![(input.path.clone(), source)];
while let Some((path, source)) = check.pop() {
for n in parse(&source, input.syntax)? {
match n {
Node::Extends(Expr::StrLit(extends)) => {
let extends = input.config.find_template(extends, Some(&path))?;
let dependency_path = (path.clone(), extends.clone());
if dependency_graph.contains(&dependency_path) {
return Err(format!(
"cyclic dependecy in graph {:#?}",
dependency_graph
.iter()
.map(|e| format!("{:#?} --> {:#?}", e.0, e.1))
.collect::<Vec<String>>()
)
.into());
}
dependency_graph.push(dependency_path);
let source = get_template_source(&extends)?;
check.push((extends, source));
}
Node::Import(_, import, _) => {
let import = input.config.find_template(import, Some(&path))?;
let source = get_template_source(&import)?;
check.push((import, source));
}
_ => {}
}
}
map.insert(path, source);
}
Ok(())
}
struct Generator<'a, S: std::hash::BuildHasher> {
// The template input state: original struct AST and attributes
input: &'a TemplateInput<'a>,
// All contexts, keyed by the package-relative template path
contexts: &'a HashMap<&'a Path, Context<'a>, S>,
// The heritage contains references to blocks and their ancestry
heritage: Option<&'a Heritage<'a>>,
// Variables accessible directly from the current scope (not redirected to context)
locals: MapChain<'a, &'a str, LocalMeta>,
// Suffix whitespace from the previous literal. Will be flushed to the
// output buffer unless suppressed by whitespace suppression on the next
// non-literal.
next_ws: Option<&'a str>,
// Whitespace suppression from the previous non-literal. Will be used to
// determine whether to flush prefix whitespace from the next literal.
skip_ws: WhitespaceHandling,
// If currently in a block, this will contain the name of a potential parent block
super_block: Option<(&'a str, usize)>,
// buffer for writable
buf_writable: Vec<Writable<'a>>,
// Counter for write! hash named arguments
named: usize,
// If set to `suppress`, the whitespace characters will be removed by default unless `+` is
// used.
whitespace: WhitespaceHandling,
}
impl<'a, S: std::hash::BuildHasher> Generator<'a, S> {
fn new<'n>(
input: &'n TemplateInput<'_>,
contexts: &'n HashMap<&'n Path, Context<'n>, S>,
heritage: Option<&'n Heritage<'_>>,
locals: MapChain<'n, &'n str, LocalMeta>,
whitespace: WhitespaceHandling,
) -> Generator<'n, S> {
Generator {
input,
contexts,
heritage,
locals,
next_ws: None,
skip_ws: WhitespaceHandling::Preserve,
super_block: None,
buf_writable: vec![],
named: 0,
whitespace,
}
}
fn child(&mut self) -> Generator<'_, S> {
let locals = MapChain::with_parent(&self.locals);
Self::new(
self.input,
self.contexts,
self.heritage,
locals,
self.whitespace,
)
}
// Takes a Context and generates the relevant implementations.
fn build(mut self, ctx: &'a Context<'_>) -> Result<String, CompileError> {
let mut buf = Buffer::new(0);
if !ctx.blocks.is_empty() {
if let Some(parent) = self.input.parent {
self.deref_to_parent(&mut buf, parent)?;
}
};
self.impl_template(ctx, &mut buf)?;
self.impl_display(&mut buf)?;
#[cfg(feature = "actix-web")]
self.impl_actix_web_responder(&mut buf)?;
#[cfg(feature = "axum")]
self.impl_axum_into_response(&mut buf)?;
#[cfg(feature = "gotham")]
self.impl_gotham_into_response(&mut buf)?;
#[cfg(feature = "mendes")]
self.impl_mendes_responder(&mut buf)?;
#[cfg(feature = "rocket")]
self.impl_rocket_responder(&mut buf)?;
#[cfg(feature = "tide")]
self.impl_tide_integrations(&mut buf)?;
#[cfg(feature = "warp")]
self.impl_warp_reply(&mut buf)?;
Ok(buf.buf)
}
// Implement `Template` for the given context struct.
fn impl_template(
&mut self,
ctx: &'a Context<'_>,
buf: &mut Buffer,
) -> Result<(), CompileError> {
self.write_header(buf, "::askama::Template", None)?;
buf.writeln(
"fn render_into(&self, writer: &mut (impl ::std::fmt::Write + ?Sized)) -> \
::askama::Result<()> {",
)?;
// Make sure the compiler understands that the generated code depends on the template files.
for path in self.contexts.keys() {
// Skip the fake path of templates defined in rust source.
let path_is_valid = match self.input.source {
Source::Path(_) => true,
Source::Source(_) => path != &self.input.path,
};
if path_is_valid {
let path = path.to_str().unwrap();
buf.writeln(
"e! {
include_bytes!(#path);
}
.to_string(),
)?;
}
}
let size_hint = if let Some(heritage) = self.heritage {
self.handle(heritage.root, heritage.root.nodes, buf, AstLevel::Top)
} else {
self.handle(ctx, ctx.nodes, buf, AstLevel::Top)
}?;
self.flush_ws(Ws(None, None));
buf.writeln("::askama::Result::Ok(())")?;
buf.writeln("}")?;
buf.writeln("const EXTENSION: ::std::option::Option<&'static ::std::primitive::str> = ")?;
buf.writeln(&format!("{:?}", self.input.extension()))?;
buf.writeln(";")?;
buf.writeln("const SIZE_HINT: ::std::primitive::usize = ")?;
buf.writeln(&format!("{}", size_hint))?;
buf.writeln(";")?;
buf.writeln("const MIME_TYPE: &'static ::std::primitive::str = ")?;
buf.writeln(&format!("{:?}", &self.input.mime_type))?;
buf.writeln(";")?;
buf.writeln("}")?;
Ok(())
}
// Implement `Deref<Parent>` for an inheriting context struct.
fn deref_to_parent(
&mut self,
buf: &mut Buffer,
parent_type: &syn::Type,
) -> Result<(), CompileError> {
self.write_header(buf, "::std::ops::Deref", None)?;
buf.writeln(&format!(
"type Target = {};",
parent_type.into_token_stream()
))?;
buf.writeln("#[inline]")?;
buf.writeln("fn deref(&self) -> &Self::Target {")?;
buf.writeln("&self._parent")?;
buf.writeln("}")?;
buf.writeln("}")
}
// Implement `Display` for the given context struct.
fn impl_display(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
self.write_header(buf, "::std::fmt::Display", None)?;
buf.writeln("#[inline]")?;
buf.writeln("fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {")?;
buf.writeln("::askama::Template::render_into(self, f).map_err(|_| ::std::fmt::Error {})")?;
buf.writeln("}")?;
buf.writeln("}")
}
// Implement Actix-web's `Responder`.
#[cfg(feature = "actix-web")]
fn impl_actix_web_responder(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
self.write_header(buf, "::askama_actix::actix_web::Responder", None)?;
buf.writeln("type Body = ::askama_actix::actix_web::body::BoxBody;")?;
buf.writeln("#[inline]")?;
buf.writeln(
"fn respond_to(self, _req: &::askama_actix::actix_web::HttpRequest) \
-> ::askama_actix::actix_web::HttpResponse<Self::Body> {",
)?;
buf.writeln("<Self as ::askama_actix::TemplateToResponse>::to_response(&self)")?;
buf.writeln("}")?;
buf.writeln("}")
}
// Implement Axum's `IntoResponse`.
#[cfg(feature = "axum")]
fn impl_axum_into_response(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
self.write_header(buf, "::askama_axum::IntoResponse", None)?;
buf.writeln("#[inline]")?;
buf.writeln(
"fn into_response(self)\
-> ::askama_axum::Response {",
)?;
let ext = self.input.extension().unwrap_or("txt");
buf.writeln(&format!("::askama_axum::into_response(&self, {:?})", ext))?;
buf.writeln("}")?;
buf.writeln("}")
}
// Implement gotham's `IntoResponse`.
#[cfg(feature = "gotham")]
fn impl_gotham_into_response(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
self.write_header(buf, "::askama_gotham::IntoResponse", None)?;
buf.writeln("#[inline]")?;
buf.writeln(
"fn into_response(self, _state: &::askama_gotham::State)\
-> ::askama_gotham::Response<::askama_gotham::Body> {",
)?;
let ext = self.input.extension().unwrap_or("txt");
buf.writeln(&format!("::askama_gotham::respond(&self, {:?})", ext))?;
buf.writeln("}")?;
buf.writeln("}")
}
// Implement mendes' `Responder`.
#[cfg(feature = "mendes")]
fn impl_mendes_responder(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
let param = syn::parse_str("A: ::mendes::Application").unwrap();
let mut generics = self.input.ast.generics.clone();
generics.params.push(param);
let (_, orig_ty_generics, _) = self.input.ast.generics.split_for_impl();
let (impl_generics, _, where_clause) = generics.split_for_impl();
let mut where_clause = match where_clause {
Some(clause) => clause.clone(),
None => syn::WhereClause {
where_token: syn::Token),
predicates: syn::punctuated::Punctuated::new(),
},
};
where_clause
.predicates
.push(syn::parse_str("A::ResponseBody: From<String>").unwrap());
where_clause
.predicates
.push(syn::parse_str("A::Error: From<::askama_mendes::Error>").unwrap());
buf.writeln(
format!(
"{} {} for {} {} {{",
quote!(impl#impl_generics),
"::mendes::application::IntoResponse<A>",
self.input.ast.ident,
quote!(#orig_ty_generics #where_clause),
)
.as_ref(),
)?;
buf.writeln(
"fn into_response(self, app: &A, req: &::mendes::http::request::Parts) \
-> ::mendes::http::Response<A::ResponseBody> {",
)?;
buf.writeln(&format!(
"::askama_mendes::into_response(app, req, &self, {:?})",
self.input.extension()
))?;
buf.writeln("}")?;
buf.writeln("}")?;
Ok(())
}
// Implement Rocket's `Responder`.
#[cfg(feature = "rocket")]
fn impl_rocket_responder(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
let lifetime = syn::Lifetime::new("'askama", proc_macro2::Span::call_site());
let param = syn::GenericParam::Lifetime(syn::LifetimeDef::new(lifetime));
self.write_header(
buf,
"::askama_rocket::Responder<'askama>",
Some(vec![param]),
)?;
buf.writeln("#[inline]")?;
buf.writeln(
"fn respond_to(self, _: &::askama_rocket::Request) \
-> ::askama_rocket::Result<'askama> {",
)?;
let ext = self.input.extension().unwrap_or("txt");
buf.writeln(&format!("::askama_rocket::respond(&self, {:?})", ext))?;
buf.writeln("}")?;
buf.writeln("}")?;
Ok(())
}
#[cfg(feature = "tide")]
fn impl_tide_integrations(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
let ext = self.input.extension().unwrap_or("txt");
self.write_header(
buf,
"::std::convert::TryInto<::askama_tide::tide::Body>",
None,
)?;
buf.writeln(
"type Error = ::askama_tide::askama::Error;\n\
#[inline]\n\
fn try_into(self) -> ::askama_tide::askama::Result<::askama_tide::tide::Body> {",
)?;
buf.writeln(&format!("::askama_tide::try_into_body(&self, {:?})", &ext))?;
buf.writeln("}")?;
buf.writeln("}")?;
buf.writeln("#[allow(clippy::from_over_into)]")?;
self.write_header(buf, "Into<::askama_tide::tide::Response>", None)?;
buf.writeln("#[inline]")?;
buf.writeln("fn into(self) -> ::askama_tide::tide::Response {")?;
buf.writeln(&format!("::askama_tide::into_response(&self, {:?})", ext))?;
buf.writeln("}\n}")
}
#[cfg(feature = "warp")]
fn impl_warp_reply(&mut self, buf: &mut Buffer) -> Result<(), CompileError> {
self.write_header(buf, "::askama_warp::warp::reply::Reply", None)?;
buf.writeln("#[inline]")?;
buf.writeln("fn into_response(self) -> ::askama_warp::warp::reply::Response {")?;
let ext = self.input.extension().unwrap_or("txt");
buf.writeln(&format!("::askama_warp::reply(&self, {:?})", ext))?;
buf.writeln("}")?;
buf.writeln("}")
}
// Writes header for the `impl` for `TraitFromPathName` or `Template`
// for the given context struct.
fn write_header(
&mut self,
buf: &mut Buffer,
target: &str,
params: Option<Vec<syn::GenericParam>>,
) -> Result<(), CompileError> {
let mut generics = self.input.ast.generics.clone();
if let Some(params) = params {
for param in params {
generics.params.push(param);
}
}
let (_, orig_ty_generics, _) = self.input.ast.generics.split_for_impl();
let (impl_generics, _, where_clause) = generics.split_for_impl();
buf.writeln(
format!(
"{} {} for {}{} {{",
quote!(impl#impl_generics),
target,
self.input.ast.ident,
quote!(#orig_ty_generics #where_clause),
)
.as_ref(),
)
}
/* Helper methods for handling node types */
fn handle(
&mut self,
ctx: &'a Context<'_>,
nodes: &'a [Node<'_>],
buf: &mut Buffer,
level: AstLevel,
) -> Result<usize, CompileError> {
let mut size_hint = 0;
for n in nodes {
match *n {
Node::Lit(lws, val, rws) => {
self.visit_lit(lws, val, rws);
}
Node::Comment(ws) => {
self.write_comment(ws);
}
Node::Expr(ws, ref val) => {
self.write_expr(ws, val);
}
Node::LetDecl(ws, ref var) => {
self.write_let_decl(buf, ws, var)?;
}
Node::Let(ws, ref var, ref val) => {
self.write_let(buf, ws, var, val)?;
}
Node::Cond(ref conds, ws) => {
self.write_cond(ctx, buf, conds, ws)?;
}
Node::Match(ws1, ref expr, ref arms, ws2) => {
self.write_match(ctx, buf, ws1, expr, arms, ws2)?;
}
Node::Loop(ref loop_block) => {
self.write_loop(ctx, buf, loop_block)?;
}
Node::BlockDef(ws1, name, _, ws2) => {
self.write_block(buf, Some(name), Ws(ws1.0, ws2.1))?;
}
Node::Include(ws, path) => {
size_hint += self.handle_include(ctx, buf, ws, path)?;
}
Node::Call(ws, scope, name, ref args) => {
size_hint += self.write_call(ctx, buf, ws, scope, name, args)?;
}
Node::Macro(_, ref m) => {
if level != AstLevel::Top {
return Err("macro blocks only allowed at the top level".into());
}
self.flush_ws(m.ws1);
self.prepare_ws(m.ws2);
}
Node::Raw(ws1, lws, val, rws, ws2) => {
self.handle_ws(ws1);
self.visit_lit(lws, val, rws);
self.handle_ws(ws2);
}
Node::Import(ws, _, _) => {
if level != AstLevel::Top {
return Err("import blocks only allowed at the top level".into());
}
self.handle_ws(ws);
}
Node::Extends(_) => {
if level != AstLevel::Top {
return Err("extend blocks only allowed at the top level".into());
}
// No whitespace handling: child template top-level is not used,
// except for the blocks defined in it.
}
Node::Break(ws) => {
self.handle_ws(ws);
self.write_buf_writable(buf)?;
buf.writeln("break;")?;
}
Node::Continue(ws) => {
self.handle_ws(ws);
self.write_buf_writable(buf)?;
buf.writeln("continue;")?;
}
}
}
if AstLevel::Top == level {
size_hint += self.write_buf_writable(buf)?;
}
Ok(size_hint)
}
fn write_cond(
&mut self,
ctx: &'a Context<'_>,
buf: &mut Buffer,
conds: &'a [Cond<'_>],
ws: Ws,
) -> Result<usize, CompileError> {
let mut flushed = 0;
let mut arm_sizes = Vec::new();
let mut has_else = false;
for (i, &(cws, ref cond, ref nodes)) in conds.iter().enumerate() {
self.handle_ws(cws);
flushed += self.write_buf_writable(buf)?;
if i > 0 {
self.locals.pop();
}
self.locals.push();
let mut arm_size = 0;
if let Some(CondTest { target, expr }) = cond {
if i == 0 {
buf.write("if ");
} else {
buf.dedent()?;
buf.write("} else if ");
}
if let Some(target) = target {
let mut expr_buf = Buffer::new(0);
self.visit_expr(&mut expr_buf, expr)?;
buf.write("let ");
self.visit_target(buf, true, true, target);
buf.write(" = &(");
buf.write(&expr_buf.buf);
buf.write(")");
} else {
// The following syntax `*(&(...) as &bool)` is used to
// trigger Rust's automatic dereferencing, to coerce
// e.g. `&&&&&bool` to `bool`. First `&(...) as &bool`
// coerces e.g. `&&&bool` to `&bool`. Then `*(&bool)`
// finally dereferences it to `bool`.
buf.write("*(&(");
let expr_code = self.visit_expr_root(expr)?;
buf.write(&expr_code);
buf.write(") as &bool)");
}
} else {
buf.dedent()?;
buf.write("} else");
has_else = true;
}
buf.writeln(" {")?;
arm_size += self.handle(ctx, nodes, buf, AstLevel::Nested)?;
arm_sizes.push(arm_size);
}
self.handle_ws(ws);
flushed += self.write_buf_writable(buf)?;
buf.writeln("}")?;
self.locals.pop();
if !has_else {
arm_sizes.push(0);
}
Ok(flushed + median(&mut arm_sizes))
}
#[allow(clippy::too_many_arguments)]
fn write_match(
&mut self,
ctx: &'a Context<'_>,
buf: &mut Buffer,
ws1: Ws,
expr: &Expr<'_>,
arms: &'a [When<'_>],
ws2: Ws,
) -> Result<usize, CompileError> {
self.flush_ws(ws1);
let flushed = self.write_buf_writable(buf)?;
let mut arm_sizes = Vec::new();
let expr_code = self.visit_expr_root(expr)?;
buf.writeln(&format!("match &{} {{", expr_code))?;
let mut arm_size = 0;
for (i, arm) in arms.iter().enumerate() {
let &(ws, ref target, ref body) = arm;
self.handle_ws(ws);
if i > 0 {
arm_sizes.push(arm_size + self.write_buf_writable(buf)?);
buf.writeln("}")?;
self.locals.pop();
}
self.locals.push();
self.visit_target(buf, true, true, target);
buf.writeln(" => {")?;
arm_size = self.handle(ctx, body, buf, AstLevel::Nested)?;
}
self.handle_ws(ws2);
arm_sizes.push(arm_size + self.write_buf_writable(buf)?);
buf.writeln("}")?;
self.locals.pop();
buf.writeln("}")?;
Ok(flushed + median(&mut arm_sizes))
}
#[allow(clippy::too_many_arguments)]
fn write_loop(
&mut self,
ctx: &'a Context<'_>,
buf: &mut Buffer,
loop_block: &'a Loop<'_>,
) -> Result<usize, CompileError> {
self.handle_ws(loop_block.ws1);
self.locals.push();
let expr_code = self.visit_expr_root(&loop_block.iter)?;
let flushed = self.write_buf_writable(buf)?;
buf.writeln("{")?;
buf.writeln("let mut _did_loop = false;")?;
match loop_block.iter {
Expr::Range(_, _, _) => buf.writeln(&format!("let _iter = {};", expr_code)),
Expr::Array(..) => buf.writeln(&format!("let _iter = {}.iter();", expr_code)),
// If `iter` is a call then we assume it's something that returns
// an iterator. If not then the user can explicitly add the needed
// call without issues.
Expr::Call(..) | Expr::Index(..) => {
buf.writeln(&format!("let _iter = ({}).into_iter();", expr_code))
}
// If accessing `self` then it most likely needs to be
// borrowed, to prevent an attempt of moving.
_ if expr_code.starts_with("self.") => {
buf.writeln(&format!("let _iter = (&{}).into_iter();", expr_code))
}
// If accessing a field then it most likely needs to be
// borrowed, to prevent an attempt of moving.
Expr::Attr(..) => buf.writeln(&format!("let _iter = (&{}).into_iter();", expr_code)),
// Otherwise, we borrow `iter` assuming that it implements `IntoIterator`.
_ => buf.writeln(&format!("let _iter = ({}).into_iter();", expr_code)),
}?;
if let Some(cond) = &loop_block.cond {
self.locals.push();
buf.write("let _iter = _iter.filter(|");
self.visit_target(buf, true, true, &loop_block.var);
buf.write("| -> bool {");
self.visit_expr(buf, cond)?;
buf.writeln("});")?;
self.locals.pop();
}
self.locals.push();
buf.write("for (");
self.visit_target(buf, true, true, &loop_block.var);
buf.writeln(", _loop_item) in ::askama::helpers::TemplateLoop::new(_iter) {")?;
buf.writeln("_did_loop = true;")?;
let mut size_hint1 = self.handle(ctx, &loop_block.body, buf, AstLevel::Nested)?;
self.handle_ws(loop_block.ws2);
size_hint1 += self.write_buf_writable(buf)?;
self.locals.pop();
buf.writeln("}")?;
buf.writeln("if !_did_loop {")?;
self.locals.push();
let mut size_hint2 = self.handle(ctx, &loop_block.else_block, buf, AstLevel::Nested)?;
self.handle_ws(loop_block.ws3);
size_hint2 += self.write_buf_writable(buf)?;
self.locals.pop();
buf.writeln("}")?;
buf.writeln("}")?;
Ok(flushed + ((size_hint1 * 3) + size_hint2) / 2)
}
fn write_call(
&mut self,
ctx: &'a Context<'_>,
buf: &mut Buffer,
ws: Ws,
scope: Option<&str>,
name: &str,
args: &[Expr<'_>],
) -> Result<usize, CompileError> {
if name == "super" {
return self.write_block(buf, None, ws);
}
let (def, own_ctx) = match scope {
Some(s) => {
let path = ctx.imports.get(s).ok_or_else(|| {
CompileError::from(format!("no import found for scope {:?}", s))
})?;
let mctx = self.contexts.get(path.as_path()).ok_or_else(|| {
CompileError::from(format!("context for {:?} not found", path))
})?;
let def = mctx.macros.get(name).ok_or_else(|| {
CompileError::from(format!("macro {:?} not found in scope {:?}", name, s))
})?;
(def, mctx)
}
None => {
let def = ctx
.macros
.get(name)
.ok_or_else(|| CompileError::from(format!("macro {:?} not found", name)))?;
(def, ctx)
}
};
self.flush_ws(ws); // Cannot handle_ws() here: whitespace from macro definition comes first
self.locals.push();
self.write_buf_writable(buf)?;
buf.writeln("{")?;
self.prepare_ws(def.ws1);
let mut names = Buffer::new(0);
let mut values = Buffer::new(0);
let mut is_first_variable = true;
for (i, arg) in def.args.iter().enumerate() {
let expr = args.get(i).ok_or_else(|| {
CompileError::from(format!("macro {:?} takes more than {} arguments", name, i))
})?;
match expr {
// If `expr` is already a form of variable then
// don't reintroduce a new variable. This is
// to avoid moving non-copyable values.
Expr::Var(name) => {
let var = self.locals.resolve_or_self(name);
self.locals.insert(arg, LocalMeta::with_ref(var));
}
Expr::Attr(obj, attr) => {
let mut attr_buf = Buffer::new(0);
self.visit_attr(&mut attr_buf, obj, attr)?;
let var = self.locals.resolve(&attr_buf.buf).unwrap_or(attr_buf.buf);
self.locals.insert(arg, LocalMeta::with_ref(var));
}
// Everything else still needs to become variables,
// to avoid having the same logic be executed
// multiple times, e.g. in the case of macro
// parameters being used multiple times.
_ => {
if is_first_variable {
is_first_variable = false
} else {
names.write(", ");
values.write(", ");
}
names.write(arg);
values.write("(");
values.write(&self.visit_expr_root(expr)?);
values.write(")");
self.locals.insert_with_default(arg);
}
}
}
debug_assert_eq!(names.buf.is_empty(), values.buf.is_empty());
if !names.buf.is_empty() {
buf.writeln(&format!("let ({}) = ({});", names.buf, values.buf))?;
}
let mut size_hint = self.handle(own_ctx, &def.nodes, buf, AstLevel::Nested)?;
self.flush_ws(def.ws2);
size_hint += self.write_buf_writable(buf)?;
buf.writeln("}")?;
self.locals.pop();
self.prepare_ws(ws);
Ok(size_hint)
}
fn handle_include(
&mut self,
ctx: &'a Context<'_>,
buf: &mut Buffer,
ws: Ws,
path: &str,
) -> Result<usize, CompileError> {
self.flush_ws(ws);
self.write_buf_writable(buf)?;
let path = self
.input
.config
.find_template(path, Some(&self.input.path))?;
let src = get_template_source(&path)?;
let nodes = parse(&src, self.input.syntax)?;
// Make sure the compiler understands that the generated code depends on the template file.
{
let path = path.to_str().unwrap();
buf.writeln(
"e! {
include_bytes!(#path);
}
.to_string(),
)?;
}
let size_hint = {
// Since nodes must not outlive the Generator, we instantiate
// a nested Generator here to handle the include's nodes.
let mut gen = self.child();
let mut size_hint = gen.handle(ctx, &nodes, buf, AstLevel::Nested)?;
size_hint += gen.write_buf_writable(buf)?;
size_hint
};
self.prepare_ws(ws);
Ok(size_hint)
}
fn write_let_decl(
&mut self,
buf: &mut Buffer,
ws: Ws,
var: &'a Target<'_>,
) -> Result<(), CompileError> {
self.handle_ws(ws);
self.write_buf_writable(buf)?;
buf.write("let ");
self.visit_target(buf, false, true, var);
buf.writeln(";")
}
fn is_shadowing_variable(&self, var: &Target<'a>) -> Result<bool, CompileError> {
match var {
Target::Name(name) => {
let name = normalize_identifier(name);
match self.locals.get(&name) {
// declares a new variable
None => Ok(false),
// an initialized variable gets shadowed
Some(meta) if meta.initialized => Ok(true),
// initializes a variable that was introduced in a LetDecl before
_ => Ok(false),
}
}
Target::Tuple(_, targets) => {
for target in targets {
match self.is_shadowing_variable(target) {
Ok(false) => continue,
outcome => return outcome,
}
}
Ok(false)
}
Target::Struct(_, named_targets) => {
for (_, target) in named_targets {
match self.is_shadowing_variable(target) {
Ok(false) => continue,
outcome => return outcome,
}
}
Ok(false)
}
_ => Err("literals are not allowed on the left-hand side of an assignment".into()),
}
}
fn write_let(
&mut self,
buf: &mut Buffer,
ws: Ws,
var: &'a Target<'_>,
val: &Expr<'_>,
) -> Result<(), CompileError> {
self.handle_ws(ws);
let mut expr_buf = Buffer::new(0);
self.visit_expr(&mut expr_buf, val)?;
let shadowed = self.is_shadowing_variable(var)?;
if shadowed {
// Need to flush the buffer if the variable is being shadowed,
// to ensure the old variable is used.
self.write_buf_writable(buf)?;
}
if shadowed
|| !matches!(var, &Target::Name(_))
|| matches!(var, Target::Name(name) if self.locals.get(name).is_none())
{
buf.write("let ");
}
self.visit_target(buf, true, true, var);
buf.writeln(&format!(" = {};", &expr_buf.buf))
}
// If `name` is `Some`, this is a call to a block definition, and we have to find
// the first block for that name from the ancestry chain. If name is `None`, this
// is from a `super()` call, and we can get the name from `self.super_block`.
fn write_block(
&mut self,
buf: &mut Buffer,
name: Option<&'a str>,
outer: Ws,
) -> Result<usize, CompileError> {
// Flush preceding whitespace according to the outer WS spec
self.flush_ws(outer);
let prev_block = self.super_block;
let cur = match (name, prev_block) {
// The top-level context contains a block definition
(Some(cur_name), None) => (cur_name, 0),
// A block definition contains a block definition of the same name
(Some(cur_name), Some((prev_name, _))) if cur_name == prev_name => {
return Err(format!("cannot define recursive blocks ({})", cur_name).into());
}
// A block definition contains a definition of another block
(Some(cur_name), Some((_, _))) => (cur_name, 0),
// `super()` was called inside a block
(None, Some((prev_name, gen))) => (prev_name, gen + 1),
// `super()` is called from outside a block
(None, None) => return Err("cannot call 'super()' outside block".into()),
};
self.super_block = Some(cur);
// Get the block definition from the heritage chain
let heritage = self
.heritage
.as_ref()
.ok_or_else(|| CompileError::from("no block ancestors available"))?;
let (ctx, def) = heritage.blocks[cur.0].get(cur.1).ok_or_else(|| {
CompileError::from(match name {
None => format!("no super() block found for block '{}'", cur.0),
Some(name) => format!("no block found for name '{}'", name),
})
})?;
// Get the nodes and whitespace suppression data from the block definition
let (ws1, nodes, ws2) = if let Node::BlockDef(ws1, _, nodes, ws2) = def {
(ws1, nodes, ws2)
} else {
unreachable!()
};
// Handle inner whitespace suppression spec and process block nodes
self.prepare_ws(*ws1);
self.locals.push();
let size_hint = self.handle(ctx, nodes, buf, AstLevel::Block)?;
if !self.locals.is_current_empty() {
// Need to flush the buffer before popping the variable stack
self.write_buf_writable(buf)?;
}
self.locals.pop();
self.flush_ws(*ws2);
// Restore original block context and set whitespace suppression for
// succeeding whitespace according to the outer WS spec
self.super_block = prev_block;
self.prepare_ws(outer);
Ok(size_hint)
}
fn write_expr(&mut self, ws: Ws, s: &'a Expr<'a>) {
self.handle_ws(ws);
self.buf_writable.push(Writable::Expr(s));
}
// Write expression buffer and empty
fn write_buf_writable(&mut self, buf: &mut Buffer) -> Result<usize, CompileError> {
if self.buf_writable.is_empty() {
return Ok(0);
}
if self
.buf_writable
.iter()
.all(|w| matches!(w, Writable::Lit(_)))
{
let mut buf_lit = Buffer::new(0);
for s in mem::take(&mut self.buf_writable) {
if let Writable::Lit(s) = s {
buf_lit.write(s);
};
}
buf.writeln(&format!("writer.write_str({:#?})?;", &buf_lit.buf))?;
return Ok(buf_lit.buf.len());
}
let mut size_hint = 0;
let mut buf_format = Buffer::new(0);
let mut buf_expr = Buffer::new(buf.indent + 1);
let mut expr_cache = HashMap::with_capacity(self.buf_writable.len());
for s in mem::take(&mut self.buf_writable) {
match s {
Writable::Lit(s) => {
buf_format.write(&s.replace('{', "{{").replace('}', "}}"));
size_hint += s.len();
}
Writable::Expr(s) => {
use self::DisplayWrap::*;
let mut expr_buf = Buffer::new(0);
let wrapped = self.visit_expr(&mut expr_buf, s)?;
let expression = match wrapped {
Wrapped => expr_buf.buf,
Unwrapped => format!(
"::askama::MarkupDisplay::new_unsafe(&({}), {})",
expr_buf.buf, self.input.escaper
),
};
use std::collections::hash_map::Entry;
let id = match expr_cache.entry(expression.clone()) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
let id = self.named;
self.named += 1;
buf_expr.write(&format!("expr{} = ", id));
buf_expr.write("&");
buf_expr.write(&expression);
buf_expr.writeln(",")?;
e.insert(id);
id
}
};
buf_format.write(&format!("{{expr{}}}", id));
size_hint += 3;
}
}
}
buf.writeln("::std::write!(")?;
buf.indent();
buf.writeln("writer,")?;
buf.writeln(&format!("{:#?},", &buf_format.buf))?;
buf.writeln(buf_expr.buf.trim())?;
buf.dedent()?;
buf.writeln(")?;")?;
Ok(size_hint)
}
fn visit_lit(&mut self, lws: &'a str, val: &'a str, rws: &'a str) {
assert!(self.next_ws.is_none());
if !lws.is_empty() {
match self.skip_ws {
WhitespaceHandling::Suppress => {
self.skip_ws = WhitespaceHandling::Preserve;
}
_ if val.is_empty() => {
assert!(rws.is_empty());
self.next_ws = Some(lws);
}
WhitespaceHandling::Preserve => self.buf_writable.push(Writable::Lit(lws)),
WhitespaceHandling::Minimize => {
self.buf_writable
.push(Writable::Lit(match lws.contains('\n') {
true => "\n",
false => " ",
}))
}
}
}
if !val.is_empty() {
self.buf_writable.push(Writable::Lit(val));
}
if !rws.is_empty() {
self.next_ws = Some(rws);
}
}
fn write_comment(&mut self, ws: Ws) {
self.handle_ws(ws);
}
/* Visitor methods for expression types */
fn visit_expr_root(&mut self, expr: &Expr<'_>) -> Result<String, CompileError> {
let mut buf = Buffer::new(0);
self.visit_expr(&mut buf, expr)?;
Ok(buf.buf)
}
fn visit_expr(
&mut self,
buf: &mut Buffer,
expr: &Expr<'_>,
) -> Result<DisplayWrap, CompileError> {
Ok(match *expr {
Expr::BoolLit(s) => self.visit_bool_lit(buf, s),
Expr::NumLit(s) => self.visit_num_lit(buf, s),
Expr::StrLit(s) => self.visit_str_lit(buf, s),
Expr::CharLit(s) => self.visit_char_lit(buf, s),
Expr::Var(s) => self.visit_var(buf, s),
Expr::Path(ref path) => self.visit_path(buf, path),
Expr::Array(ref elements) => self.visit_array(buf, elements)?,
Expr::Attr(ref obj, name) => self.visit_attr(buf, obj, name)?,
Expr::Index(ref obj, ref key) => self.visit_index(buf, obj, key)?,
Expr::Filter(name, ref args) => self.visit_filter(buf, name, args)?,
Expr::Unary(op, ref inner) => self.visit_unary(buf, op, inner)?,
Expr::BinOp(op, ref left, ref right) => self.visit_binop(buf, op, left, right)?,
Expr::Range(op, ref left, ref right) => self.visit_range(buf, op, left, right)?,
Expr::Group(ref inner) => self.visit_group(buf, inner)?,
Expr::Call(ref obj, ref args) => self.visit_call(buf, obj, args)?,
Expr::RustMacro(name, args) => self.visit_rust_macro(buf, name, args),
Expr::Try(ref expr) => self.visit_try(buf, expr.as_ref())?,
Expr::Tuple(ref exprs) => self.visit_tuple(buf, exprs)?,
})
}
fn visit_try(
&mut self,
buf: &mut Buffer,
expr: &Expr<'_>,
) -> Result<DisplayWrap, CompileError> {
buf.write("::core::result::Result::map_err(");
self.visit_expr(buf, expr)?;
buf.write(", |err| ::askama::shared::Error::Custom(::core::convert::Into::into(err)))?");
Ok(DisplayWrap::Unwrapped)
}
fn visit_rust_macro(&mut self, buf: &mut Buffer, name: &str, args: &str) -> DisplayWrap {
buf.write(name);
buf.write("!(");
buf.write(args);
buf.write(")");
DisplayWrap::Unwrapped
}
#[cfg(not(feature = "markdown"))]
fn _visit_markdown_filter(
&mut self,
_buf: &mut Buffer,
_args: &[Expr<'_>],
) -> Result<DisplayWrap, CompileError> {
Err("the `markdown` filter requires the `markdown` feature to be enabled".into())
}
#[cfg(feature = "markdown")]
fn _visit_markdown_filter(
&mut self,
buf: &mut Buffer,
args: &[Expr<'_>],
) -> Result<DisplayWrap, CompileError> {
let (md, options) = match args {
[md] => (md, None),
[md, options] => (md, Some(options)),
_ => return Err("markdown filter expects no more than one option argument".into()),
};
buf.write(&format!(
"::askama::filters::markdown({}, ",
self.input.escaper
));
self.visit_expr(buf, md)?;
match options {
Some(options) => {
buf.write(", ::core::option::Option::Some(");
self.visit_expr(buf, options)?;
buf.write(")");
}
None => buf.write(", ::core::option::Option::None"),
}
buf.write(")?");
Ok(DisplayWrap::Wrapped)
}
fn visit_filter(
&mut self,
buf: &mut Buffer,
mut name: &str,
args: &[Expr<'_>],
) -> Result<DisplayWrap, CompileError> {
if matches!(name, "escape" | "e") {
self._visit_escape_filter(buf, args)?;
return Ok(DisplayWrap::Wrapped);
} else if name == "format" {
self._visit_format_filter(buf, args)?;
return Ok(DisplayWrap::Unwrapped);
} else if name == "fmt" {
self._visit_fmt_filter(buf, args)?;
return Ok(DisplayWrap::Unwrapped);
} else if name == "join" {
self._visit_join_filter(buf, args)?;
return Ok(DisplayWrap::Unwrapped);
} else if name == "markdown" {
return self._visit_markdown_filter(buf, args);
}
if name == "tojson" {
name = "json";
}
#[cfg(not(feature = "json"))]
if name == "json" {
return Err("the `json` filter requires the `serde-json` feature to be enabled".into());
}
#[cfg(not(feature = "yaml"))]
if name == "yaml" {
return Err("the `yaml` filter requires the `serde-yaml` feature to be enabled".into());
}
const FILTERS: [&str; 2] = ["safe", "yaml"];
if FILTERS.contains(&name) {
buf.write(&format!(
"::askama::filters::{}({}, ",
name, self.input.escaper
));
} else if filters::BUILT_IN_FILTERS.contains(&name) {
buf.write(&format!("::askama::filters::{}(", name));
} else {
buf.write(&format!("filters::{}(", name));
}
self._visit_args(buf, args)?;
buf.write(")?");
Ok(match FILTERS.contains(&name) {
true => DisplayWrap::Wrapped,
false => DisplayWrap::Unwrapped,
})
}
fn _visit_escape_filter(
&mut self,
buf: &mut Buffer,
args: &[Expr<'_>],
) -> Result<(), CompileError> {
if args.len() > 2 {
return Err("only two arguments allowed to escape filter".into());
}
let opt_escaper = match args.get(1) {
Some(Expr::StrLit(name)) => Some(*name),
Some(_) => return Err("invalid escaper type for escape filter".into()),
None => None,
};
let escaper = match opt_escaper {
Some(name) => self
.input
.config
.escapers
.iter()
.find_map(|(escapers, escaper)| escapers.contains(name).then(|| escaper))
.ok_or_else(|| CompileError::from("invalid escaper for escape filter"))?,
None => self.input.escaper,
};
buf.write("::askama::filters::escape(");
buf.write(escaper);
buf.write(", ");
self._visit_args(buf, &args[..1])?;
buf.write(")?");
Ok(())
}
fn _visit_format_filter(
&mut self,
buf: &mut Buffer,
args: &[Expr<'_>],
) -> Result<(), CompileError> {
buf.write("format!(");
if let Some(Expr::StrLit(v)) = args.first() {
self.visit_str_lit(buf, v);
if args.len() > 1 {
buf.write(", ");
}
} else {
return Err("invalid expression type for format filter".into());
}
self._visit_args(buf, &args[1..])?;
buf.write(")");
Ok(())
}
fn _visit_fmt_filter(
&mut self,
buf: &mut Buffer,
args: &[Expr<'_>],
) -> Result<(), CompileError> {
buf.write("format!(");
if let Some(Expr::StrLit(v)) = args.get(1) {
self.visit_str_lit(buf, v);
buf.write(", ");
} else {
return Err("invalid expression type for fmt filter".into());
}
self._visit_args(buf, &args[0..1])?;
if args.len() > 2 {
return Err("only two arguments allowed to fmt filter".into());
}
buf.write(")");
Ok(())
}
// Force type coercion on first argument to `join` filter (see #39).
fn _visit_join_filter(
&mut self,
buf: &mut Buffer,
args: &[Expr<'_>],
) -> Result<(), CompileError> {
buf.write("::askama::filters::join((&");
for (i, arg) in args.iter().enumerate() {
if i > 0 {
buf.write(", &");
}
self.visit_expr(buf, arg)?;
if i == 0 {
buf.write(").into_iter()");
}
}
buf.write(")?");
Ok(())
}
fn _visit_args(&mut self, buf: &mut Buffer, args: &[Expr<'_>]) -> Result<(), CompileError> {
if args.is_empty() {
return Ok(());
}
for (i, arg) in args.iter().enumerate() {
if i > 0 {
buf.write(", ");
}
let borrow = !arg.is_copyable();
if borrow {
buf.write("&(");
}
match arg {
Expr::Call(left, _) if !matches!(left.as_ref(), Expr::Path(_)) => {
buf.writeln("{")?;
self.visit_expr(buf, arg)?;
buf.writeln("}")?;
}
_ => {
self.visit_expr(buf, arg)?;
}
}
if borrow {
buf.write(")");
}
}
Ok(())
}
fn visit_attr(
&mut self,
buf: &mut Buffer,
obj: &Expr<'_>,
attr: &str,
) -> Result<DisplayWrap, CompileError> {
if let Expr::Var(name) = *obj {
if name == "loop" {
if attr == "index" {
buf.write("(_loop_item.index + 1)");
return Ok(DisplayWrap::Unwrapped);
} else if attr == "index0" {
buf.write("_loop_item.index");
return Ok(DisplayWrap::Unwrapped);
} else if attr == "first" {
buf.write("_loop_item.first");
return Ok(DisplayWrap::Unwrapped);
} else if attr == "last" {
buf.write("_loop_item.last");
return Ok(DisplayWrap::Unwrapped);
} else {
return Err("unknown loop variable".into());
}
}
}
self.visit_expr(buf, obj)?;
buf.write(&format!(".{}", normalize_identifier(attr)));
Ok(DisplayWrap::Unwrapped)
}
fn visit_index(
&mut self,
buf: &mut Buffer,
obj: &Expr<'_>,
key: &Expr<'_>,
) -> Result<DisplayWrap, CompileError> {
buf.write("&");
self.visit_expr(buf, obj)?;
buf.write("[");
self.visit_expr(buf, key)?;
buf.write("]");
Ok(DisplayWrap::Unwrapped)
}
fn visit_call(
&mut self,
buf: &mut Buffer,
left: &Expr<'_>,
args: &[Expr<'_>],
) -> Result<DisplayWrap, CompileError> {
match left {
Expr::Attr(left, method) if **left == Expr::Var("loop") => match *method {
"cycle" => match args {
[arg] => {
if matches!(arg, Expr::Array(arr) if arr.is_empty()) {
return Err("loop.cycle(…) cannot use an empty array".into());
}
buf.write("({");
buf.write("let _cycle = &(");
self.visit_expr(buf, arg)?;
buf.writeln(");")?;
buf.writeln("let _len = _cycle.len();")?;
buf.writeln("if _len == 0 {")?;
buf.writeln("return ::core::result::Result::Err(::askama::Error::Fmt(::core::fmt::Error));")?;
buf.writeln("}")?;
buf.writeln("_cycle[_loop_item.index % _len]")?;
buf.writeln("})")?;
}
_ => return Err("loop.cycle(…) expects exactly one argument".into()),
},
s => return Err(format!("unknown loop method: {:?}", s).into()),
},
left => {
match left {
Expr::Var(name) => match self.locals.resolve(name) {
Some(resolved) => buf.write(&resolved),
None => buf.write(&format!("(&self.{})", normalize_identifier(name))),
},
left => {
self.visit_expr(buf, left)?;
}
}
buf.write("(");
self._visit_args(buf, args)?;
buf.write(")");
}
}
Ok(DisplayWrap::Unwrapped)
}
fn visit_unary(
&mut self,
buf: &mut Buffer,
op: &str,
inner: &Expr<'_>,
) -> Result<DisplayWrap, CompileError> {
buf.write(op);
self.visit_expr(buf, inner)?;
Ok(DisplayWrap::Unwrapped)
}
fn visit_range(
&mut self,
buf: &mut Buffer,
op: &str,
left: &Option<Box<Expr<'_>>>,
right: &Option<Box<Expr<'_>>>,
) -> Result<DisplayWrap, CompileError> {
if let Some(left) = left {
self.visit_expr(buf, left)?;
}
buf.write(op);
if let Some(right) = right {
self.visit_expr(buf, right)?;
}
Ok(DisplayWrap::Unwrapped)
}
fn visit_binop(
&mut self,
buf: &mut Buffer,
op: &str,
left: &Expr<'_>,
right: &Expr<'_>,
) -> Result<DisplayWrap, CompileError> {
self.visit_expr(buf, left)?;
buf.write(&format!(" {} ", op));
self.visit_expr(buf, right)?;
Ok(DisplayWrap::Unwrapped)
}
fn visit_group(
&mut self,
buf: &mut Buffer,
inner: &Expr<'_>,
) -> Result<DisplayWrap, CompileError> {
buf.write("(");
self.visit_expr(buf, inner)?;
buf.write(")");
Ok(DisplayWrap::Unwrapped)
}
fn visit_tuple(
&mut self,
buf: &mut Buffer,
exprs: &[Expr<'_>],
) -> Result<DisplayWrap, CompileError> {
buf.write("(");
for (index, expr) in exprs.iter().enumerate() {
if index > 0 {
buf.write(" ");
}
self.visit_expr(buf, expr)?;
buf.write(",");
}
buf.write(")");
Ok(DisplayWrap::Unwrapped)
}
fn visit_array(
&mut self,
buf: &mut Buffer,
elements: &[Expr<'_>],
) -> Result<DisplayWrap, CompileError> {
buf.write("[");
for (i, el) in elements.iter().enumerate() {
if i > 0 {
buf.write(", ");
}
self.visit_expr(buf, el)?;
}
buf.write("]");
Ok(DisplayWrap::Unwrapped)
}
fn visit_path(&mut self, buf: &mut Buffer, path: &[&str]) -> DisplayWrap {
for (i, part) in path.iter().enumerate() {
if i > 0 {
buf.write("::");
}
buf.write(part);
}
DisplayWrap::Unwrapped
}
fn visit_var(&mut self, buf: &mut Buffer, s: &str) -> DisplayWrap {
if s == "self" {
buf.write(s);
return DisplayWrap::Unwrapped;
}
buf.write(normalize_identifier(&self.locals.resolve_or_self(s)));
DisplayWrap::Unwrapped
}
fn visit_bool_lit(&mut self, buf: &mut Buffer, s: &str) -> DisplayWrap {
buf.write(s);
DisplayWrap::Unwrapped
}
fn visit_str_lit(&mut self, buf: &mut Buffer, s: &str) -> DisplayWrap {
buf.write(&format!("\"{}\"", s));
DisplayWrap::Unwrapped
}
fn visit_char_lit(&mut self, buf: &mut Buffer, s: &str) -> DisplayWrap {
buf.write(&format!("'{}'", s));
DisplayWrap::Unwrapped
}
fn visit_num_lit(&mut self, buf: &mut Buffer, s: &str) -> DisplayWrap {
buf.write(s);
DisplayWrap::Unwrapped
}
fn visit_target(
&mut self,
buf: &mut Buffer,
initialized: bool,
first_level: bool,
target: &Target<'a>,
) {
match target {
Target::Name("_") => {
buf.write("_");
}
Target::Name(name) => {
let name = normalize_identifier(name);
match initialized {
true => self.locals.insert(name, LocalMeta::initialized()),
false => self.locals.insert_with_default(name),
}
buf.write(name);
}
Target::Tuple(path, targets) => {
buf.write(&path.join("::"));
buf.write("(");
for target in targets {
self.visit_target(buf, initialized, false, target);
buf.write(",");
}
buf.write(")");
}
Target::Struct(path, targets) => {
buf.write(&path.join("::"));
buf.write(" { ");
for (name, target) in targets {
buf.write(normalize_identifier(name));
buf.write(": ");
self.visit_target(buf, initialized, false, target);
buf.write(",");
}
buf.write(" }");
}
Target::Path(path) => {
self.visit_path(buf, path);
}
Target::StrLit(s) => {
if first_level {
buf.write("&");
}
self.visit_str_lit(buf, s);
}
Target::NumLit(s) => {
if first_level {
buf.write("&");
}
self.visit_num_lit(buf, s);
}
Target::CharLit(s) => {
if first_level {
buf.write("&");
}
self.visit_char_lit(buf, s);
}
Target::BoolLit(s) => {
if first_level {
buf.write("&");
}
buf.write(s);
}
}
}
/* Helper methods for dealing with whitespace nodes */
// Combines `flush_ws()` and `prepare_ws()` to handle both trailing whitespace from the
// preceding literal and leading whitespace from the succeeding literal.
fn handle_ws(&mut self, ws: Ws) {
self.flush_ws(ws);
self.prepare_ws(ws);
}
fn should_trim_ws(&self, ws: Option<Whitespace>) -> WhitespaceHandling {
match ws {
Some(Whitespace::Suppress) => WhitespaceHandling::Suppress,
Some(Whitespace::Preserve) => WhitespaceHandling::Preserve,
Some(Whitespace::Minimize) => WhitespaceHandling::Minimize,
None => self.whitespace,
}
}
// If the previous literal left some trailing whitespace in `next_ws` and the
// prefix whitespace suppressor from the given argument, flush that whitespace.
// In either case, `next_ws` is reset to `None` (no trailing whitespace).
fn flush_ws(&mut self, ws: Ws) {
if self.next_ws.is_none() {
return;
}
// If `whitespace` is set to `suppress`, we keep the whitespace characters only if there is
// a `+` character.
match self.should_trim_ws(ws.0) {
WhitespaceHandling::Preserve => {
let val = self.next_ws.unwrap();
if !val.is_empty() {
self.buf_writable.push(Writable::Lit(val));
}
}
WhitespaceHandling::Minimize => {
let val = self.next_ws.unwrap();
if !val.is_empty() {
self.buf_writable
.push(Writable::Lit(match val.contains('\n') {
true => "\n",
false => " ",
}));
}
}
WhitespaceHandling::Suppress => {}
}
self.next_ws = None;
}
// Sets `skip_ws` to match the suffix whitespace suppressor from the given
// argument, to determine whether to suppress leading whitespace from the
// next literal.
fn prepare_ws(&mut self, ws: Ws) {
self.skip_ws = self.should_trim_ws(ws.1);
}
}
struct Buffer {
// The buffer to generate the code into
buf: String,
// The current level of indentation (in spaces)
indent: u8,
// Whether the output buffer is currently at the start of a line
start: bool,
}
impl Buffer {
fn new(indent: u8) -> Self {
Self {
buf: String::new(),
indent,
start: true,
}
}
fn writeln(&mut self, s: &str) -> Result<(), CompileError> {
if s == "}" {
self.dedent()?;
}
if !s.is_empty() {
self.write(s);
}
self.buf.push('\n');
if s.ends_with('{') {
self.indent();
}
self.start = true;
Ok(())
}
fn write(&mut self, s: &str) {
if self.start {
for _ in 0..(self.indent * 4) {
self.buf.push(' ');
}
self.start = false;
}
self.buf.push_str(s);
}
fn indent(&mut self) {
self.indent += 1;
}
fn dedent(&mut self) -> Result<(), CompileError> {
if self.indent == 0 {
return Err("dedent() called while indentation == 0".into());
}
self.indent -= 1;
Ok(())
}
}
#[derive(Clone, Default)]
struct LocalMeta {
refs: Option<String>,
initialized: bool,
}
impl LocalMeta {
fn initialized() -> Self {
Self {
refs: None,
initialized: true,
}
}
fn with_ref(refs: String) -> Self {
Self {
refs: Some(refs),
initialized: true,
}
}
}
// type SetChain<'a, T> = MapChain<'a, T, ()>;
#[derive(Debug)]
struct MapChain<'a, K, V>
where
K: cmp::Eq + hash::Hash,
{
parent: Option<&'a MapChain<'a, K, V>>,
scopes: Vec<HashMap<K, V>>,
}
impl<'a, K: 'a, V: 'a> MapChain<'a, K, V>
where
K: cmp::Eq + hash::Hash,
{
fn new() -> MapChain<'a, K, V> {
MapChain {
parent: None,
scopes: vec![HashMap::new()],
}
}
fn with_parent<'p>(parent: &'p MapChain<'_, K, V>) -> MapChain<'p, K, V> {
MapChain {
parent: Some(parent),
scopes: vec![HashMap::new()],
}
}
/// Iterates the scopes in reverse and returns `Some(LocalMeta)`
/// from the first scope where `key` exists.
fn get(&self, key: &K) -> Option<&V> {
let scopes = self.scopes.iter().rev();
scopes
.filter_map(|set| set.get(key))
.next()
.or_else(|| self.parent.and_then(|set| set.get(key)))
}
fn is_current_empty(&self) -> bool {
self.scopes.last().unwrap().is_empty()
}
fn insert(&mut self, key: K, val: V) {
self.scopes.last_mut().unwrap().insert(key, val);
// Note that if `insert` returns `Some` then it implies
// an identifier is reused. For e.g. `{% macro f(a, a) %}`
// and `{% let (a, a) = ... %}` then this results in a
// generated template, which when compiled fails with the
// compile error "identifier `a` used more than once".
}
fn insert_with_default(&mut self, key: K)
where
V: Default,
{
self.insert(key, V::default());
}
fn push(&mut self) {
self.scopes.push(HashMap::new());
}
fn pop(&mut self) {
self.scopes.pop().unwrap();
assert!(!self.scopes.is_empty());
}
}
impl MapChain<'_, &str, LocalMeta> {
fn resolve(&self, name: &str) -> Option<String> {
let name = normalize_identifier(name);
self.get(&name).map(|meta| match &meta.refs {
Some(expr) => expr.clone(),
None => name.to_string(),
})
}
fn resolve_or_self(&self, name: &str) -> String {
let name = normalize_identifier(name);
self.resolve(name)
.unwrap_or_else(|| format!("self.{}", name))
}
}
fn median(sizes: &mut [usize]) -> usize {
sizes.sort_unstable();
if sizes.len() % 2 == 1 {
sizes[sizes.len() / 2]
} else {
(sizes[sizes.len() / 2 - 1] + sizes[sizes.len() / 2]) / 2
}
}
#[derive(Clone, PartialEq)]
enum AstLevel {
Top,
Block,
Nested,
}
impl Copy for AstLevel {}
#[derive(Clone)]
enum DisplayWrap {
Wrapped,
Unwrapped,
}
impl Copy for DisplayWrap {}
#[derive(Debug)]
enum Writable<'a> {
Lit(&'a str),
Expr(&'a Expr<'a>),
}
// Identifiers to be replaced with raw identifiers, so as to avoid
// collisions between template syntax and Rust's syntax. In particular
// [Rust keywords](https://doc.rust-lang.org/reference/keywords.html)
// should be replaced, since they're not reserved words in Askama
// syntax but have a high probability of causing problems in the
// generated code.
//
// This list excludes the Rust keywords *self*, *Self*, and *super*
// because they are not allowed to be raw identifiers, and *loop*
// because it's used something like a keyword in the template
// language.
static USE_RAW: [(&str, &str); 47] = [
("as", "r#as"),
("break", "r#break"),
("const", "r#const"),
("continue", "r#continue"),
("crate", "r#crate"),
("else", "r#else"),
("enum", "r#enum"),
("extern", "r#extern"),
("false", "r#false"),
("fn", "r#fn"),
("for", "r#for"),
("if", "r#if"),
("impl", "r#impl"),
("in", "r#in"),
("let", "r#let"),
("match", "r#match"),
("mod", "r#mod"),
("move", "r#move"),
("mut", "r#mut"),
("pub", "r#pub"),
("ref", "r#ref"),
("return", "r#return"),
("static", "r#static"),
("struct", "r#struct"),
("trait", "r#trait"),
("true", "r#true"),
("type", "r#type"),
("unsafe", "r#unsafe"),
("use", "r#use"),
("where", "r#where"),
("while", "r#while"),
("async", "r#async"),
("await", "r#await"),
("dyn", "r#dyn"),
("abstract", "r#abstract"),
("become", "r#become"),
("box", "r#box"),
("do", "r#do"),
("final", "r#final"),
("macro", "r#macro"),
("override", "r#override"),
("priv", "r#priv"),
("typeof", "r#typeof"),
("unsized", "r#unsized"),
("virtual", "r#virtual"),
("yield", "r#yield"),
("try", "r#try"),
];
fn normalize_identifier(ident: &str) -> &str {
if let Some(word) = USE_RAW.iter().find(|x| x.0 == ident) {
word.1
} else {
ident
}
}
| 34.183302 | 118 | 0.487454 |
901cb79495225570097afb181f78842c0ff956f7 | 2,563 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::JOFR2 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct JOFFSET2R {
bits: u16,
}
impl JOFFSET2R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _JOFFSET2W<'a> {
w: &'a mut W,
}
impl<'a> _JOFFSET2W<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:11 - Data offset for injected channel x"]
#[inline]
pub fn joffset2(&self) -> JOFFSET2R {
let bits = {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
JOFFSET2R { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:11 - Data offset for injected channel x"]
#[inline]
pub fn joffset2(&mut self) -> _JOFFSET2W {
_JOFFSET2W { w: self }
}
}
| 24.179245 | 61 | 0.504097 |
91e2f2e7cd022b5de71394c326c34638fcbd3ebe | 4,836 | use nom::error::{ContextError, ErrorKind, FromExternalError, ParseError};
use oro_diagnostics::{Diagnostic, DiagnosticCategory};
use oro_node_semver::SemverError;
use thiserror::Error;
use url::ParseError as UrlParseError;
#[derive(Debug, Error)]
#[error("Error parsing package spec. {kind}")]
pub struct PackageSpecError {
pub input: String,
pub offset: usize,
pub kind: SpecErrorKind,
}
impl PackageSpecError {
pub fn location(&self) -> (usize, usize) {
// Taken partially from nom.
let prefix = &self.input.as_bytes()[..self.offset];
// Count the number of newlines in the first `offset` bytes of input
let line_number = bytecount::count(prefix, b'\n');
// Find the line that includes the subslice:
// Find the *last* newline before the substring starts
let line_begin = prefix
.iter()
.rev()
.position(|&b| b == b'\n')
.map(|pos| self.offset - pos)
.unwrap_or(0);
// Find the full line after that newline
let line = self.input[line_begin..]
.lines()
.next()
.unwrap_or(&self.input[line_begin..])
.trim_end();
// The (1-indexed) column number is the offset of our substring into that line
let column_number = self.input[self.offset..].as_ptr() as usize - line.as_ptr() as usize;
(line_number, column_number)
}
}
#[derive(Debug, Error)]
pub enum SpecErrorKind {
#[error("Found invalid characters: `{0}`")]
InvalidCharacters(String),
#[error("Drive letters on Windows can only be alphabetical. Got `{0}`.")]
InvalidDriveLetter(char),
#[error("Invalid git host `{0}`. Only github:, gitlab:, gist:, and bitbucket: are supported in shorthands.")]
InvalidGitHost(String),
#[error(transparent)]
SemverParseError(SemverError),
#[error(transparent)]
UrlParseError(UrlParseError),
#[error(transparent)]
GitHostParseError(Box<PackageSpecError>),
#[error("Failed to parse {0} component of semver string.")]
Context(&'static str),
#[error("Incomplete input to semver parser.")]
IncompleteInput,
#[error("An unspecified error occurred.")]
Other,
}
impl Diagnostic for PackageSpecError {
fn category(&self) -> DiagnosticCategory {
let (row, col) = self.location();
DiagnosticCategory::Parse {
input: self.input.clone(),
path: None,
row,
col,
}
}
fn subpath(&self) -> String {
// TODO: add more detail
"package_spec".into()
}
fn advice(&self) -> Option<String> {
// TODO: please fix this
Some("Please fix your spec. Go look up wherever they're documented idk.".into())
}
}
#[derive(Debug)]
pub(crate) struct SpecParseError<I> {
pub(crate) input: I,
pub(crate) context: Option<&'static str>,
pub(crate) kind: Option<SpecErrorKind>,
}
impl<I> ParseError<I> for SpecParseError<I> {
fn from_error_kind(input: I, _kind: nom::error::ErrorKind) -> Self {
Self {
input,
context: None,
kind: None,
}
}
fn append(_input: I, _kind: nom::error::ErrorKind, other: Self) -> Self {
other
}
}
impl<I> ContextError<I> for SpecParseError<I> {
fn add_context(_input: I, ctx: &'static str, mut other: Self) -> Self {
other.context = Some(ctx);
other
}
}
// There's a few parsers that just... manually return SpecParseError in a
// map_res, so this absurd thing is actually needed. Curious? Just comment it
// out and look at all the red.
impl<'a> FromExternalError<&'a str, SpecParseError<&'a str>> for SpecParseError<&'a str> {
fn from_external_error(_input: &'a str, _kind: ErrorKind, e: SpecParseError<&'a str>) -> Self {
e
}
}
impl<'a> FromExternalError<&'a str, SemverError> for SpecParseError<&'a str> {
fn from_external_error(input: &'a str, _kind: ErrorKind, e: SemverError) -> Self {
SpecParseError {
input,
context: None,
kind: Some(SpecErrorKind::SemverParseError(e)),
}
}
}
impl<'a> FromExternalError<&'a str, UrlParseError> for SpecParseError<&'a str> {
fn from_external_error(input: &'a str, _kind: ErrorKind, e: UrlParseError) -> Self {
SpecParseError {
input,
context: None,
kind: Some(SpecErrorKind::UrlParseError(e)),
}
}
}
impl<'a> FromExternalError<&'a str, PackageSpecError> for SpecParseError<&'a str> {
fn from_external_error(input: &'a str, _kind: ErrorKind, e: PackageSpecError) -> Self {
SpecParseError {
input,
context: None,
kind: Some(SpecErrorKind::GitHostParseError(Box::new(e))),
}
}
}
| 31 | 113 | 0.611249 |
fb147413bdd8fa29a13fa4a0ba3f92df8f361655 | 4,698 | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::writer::{ArithmeticArrayProperty, ArrayProperty, Inner, InnerValueType, InspectType};
use tracing::error;
#[cfg(test)]
use {inspect_format::Block, mapped_vmo::Mapping, std::sync::Arc};
/// Inspect double array data type.
///
/// NOTE: do not rely on PartialEq implementation for true comparison.
/// Instead leverage the reader.
///
/// NOTE: Operations on a Default value are no-ops.
#[derive(Debug, PartialEq, Eq, Default)]
pub struct DoubleArrayProperty {
pub(crate) inner: Inner<InnerValueType>,
}
impl InspectType for DoubleArrayProperty {}
crate::impl_inspect_type_internal!(DoubleArrayProperty);
impl ArrayProperty for DoubleArrayProperty {
type Type = f64;
fn set(&self, index: usize, value: f64) {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|mut state| {
state.set_array_double_slot(inner_ref.block_index, index, value)
})
.unwrap_or_else(|err| {
error!(?err, "Failed to set property");
});
}
}
fn clear(&self) {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|mut state| state.clear_array(inner_ref.block_index, 0))
.unwrap_or_else(|err| {
error!(?err, "Failed to clear property.");
});
}
}
}
impl ArithmeticArrayProperty for DoubleArrayProperty {
fn add(&self, index: usize, value: f64) {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|mut state| {
state.add_array_double_slot(inner_ref.block_index, index, value)
})
.unwrap_or_else(|err| {
error!(?err, "Failed to add property");
});
}
}
fn subtract(&self, index: usize, value: f64) {
if let Some(ref inner_ref) = self.inner.inner_ref() {
inner_ref
.state
.try_lock()
.and_then(|mut state| {
state.subtract_array_double_slot(inner_ref.block_index, index, value)
})
.unwrap_or_else(|err| {
error!(?err, "Failed to subtract property");
});
}
}
}
#[cfg(test)]
impl DoubleArrayProperty {
/// Returns the [`Block`][Block] associated with this value.
pub fn get_block(&self) -> Option<Block<Arc<Mapping>>> {
self.inner.inner_ref().and_then(|inner_ref| {
inner_ref
.state
.try_lock()
.and_then(|state| state.heap().get_block(inner_ref.block_index))
.ok()
})
}
/// Returns the index of the value's block in the VMO.
pub fn block_index(&self) -> u32 {
self.inner.inner_ref().unwrap().block_index
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::writer::Inspector;
#[fuchsia::test]
fn test_double_array() {
// Create and use a default value.
let default = DoubleArrayProperty::default();
default.add(1, 1.0);
let inspector = Inspector::new();
let root = inspector.root();
let node = root.create_child("node");
let node_block = node.get_block().unwrap();
{
let array = node.create_double_array("array_property", 5);
let array_block = array.get_block().unwrap();
array.set(0, 5.0);
assert_eq!(array_block.array_get_double_slot(0).unwrap(), 5.0);
array.add(0, 5.3);
assert_eq!(array_block.array_get_double_slot(0).unwrap(), 10.3);
array.subtract(0, 3.4);
assert_eq!(array_block.array_get_double_slot(0).unwrap(), 6.9);
array.set(1, 2.5);
array.set(3, -3.1);
for (i, value) in [6.9, 2.5, 0.0, -3.1, 0.0].iter().enumerate() {
assert_eq!(array_block.array_get_double_slot(i).unwrap(), *value);
}
array.clear();
for i in 0..5 {
assert_eq!(0.0, array_block.array_get_double_slot(i).unwrap());
}
assert_eq!(node_block.child_count().unwrap(), 1);
}
assert_eq!(node_block.child_count().unwrap(), 0);
}
}
| 31.32 | 96 | 0.549596 |
ffc7f58ed7a982e1677680a9a7fb742b48ec0145 | 4,859 | extern crate binance;
use binance::api::*;
use binance::userstream::*;
use binance::websockets::*;
use binance::model::{AccountUpdateEvent, KlineEvent, OrderTradeEvent,
TradesEvent, DayTickerEvent, OrderBook, DepthOrderBookEvent};
fn main() {
user_stream();
user_stream_websocket();
market_websocket();
kline_websocket();
all_trades_websocket();
}
fn user_stream() {
let api_key_user = Some("YOUR_API_KEY".into());
let user_stream: UserStream = Binance::new(api_key_user.clone(), None);
if let Ok(answer) = user_stream.start() {
println!("Data Stream Started ...");
let listen_key = answer.listen_key;
match user_stream.keep_alive(&listen_key) {
Ok(msg) => println!("Keepalive user data stream: {:?}", msg),
Err(e) => println!("Error: {}", e),
}
match user_stream.close(&listen_key) {
Ok(msg) => println!("Close user data stream: {:?}", msg),
Err(e) => println!("Error: {}", e),
}
} else {
println!("Not able to start an User Stream (Check your API_KEY)");
}
}
fn user_stream_websocket() {
struct WebSocketHandler;
impl UserStreamEventHandler for WebSocketHandler {
fn account_update_handler(&mut self, event: &AccountUpdateEvent) {
for balance in &event.balance {
println!(
"Asset: {}, free: {}, locked: {}",
balance.asset, balance.free, balance.locked
);
}
}
fn order_trade_handler(&mut self, event: &OrderTradeEvent) {
println!(
"Symbol: {}, Side: {}, Price: {}, Execution Type: {}",
event.symbol, event.side, event.price, event.execution_type
);
}
}
let api_key_user = Some("YOUR_KEY".into());
let user_stream: UserStream = Binance::new(api_key_user, None);
if let Ok(answer) = user_stream.start() {
let listen_key = answer.listen_key;
let mut handler = WebSocketHandler {};
let mut web_socket: WebSockets = WebSockets::new();
web_socket.add_user_stream_handler(&mut handler);
web_socket.connect(&listen_key).unwrap(); // check error
web_socket.event_loop();
} else {
println!("Not able to start an User Stream (Check your API_KEY)");
}
}
fn market_websocket() {
struct WebSocketHandler;
impl MarketEventHandler for WebSocketHandler {
fn aggregated_trades_handler(&mut self, event: &TradesEvent) {
println!(
"Symbol: {}, price: {}, qty: {}",
event.symbol, event.price, event.qty
);
}
fn depth_orderbook_handler(&mut self, event: &DepthOrderBookEvent) {
println!(
"Symbol: {}, Bids: {:?}, Ask: {:?}",
event.symbol, event.bids, event.asks
);
}
fn partial_orderbook_handler(&mut self, order_book: &OrderBook) {
println!(
"last_update_id: {}, Bids: {:?}, Ask: {:?}",
order_book.last_update_id, order_book.bids, order_book.asks
);
}
}
let agg_trade: String = format!("{}@aggTrade", "ethbtc");
let mut handler = WebSocketHandler {};
let mut web_socket: WebSockets = WebSockets::new();
web_socket.add_market_handler(&mut handler);
web_socket.connect(&agg_trade).unwrap(); // check error
web_socket.event_loop();
}
fn all_trades_websocket() {
struct WebSocketHandler;
impl DayTickerEventHandler for WebSocketHandler {
fn day_ticker_handler(&mut self, events: &[DayTickerEvent]) {
for event in events {
println!(
"Symbol: {}, price: {}, qty: {}",
event.symbol, event.best_bid, event.best_bid_qty
);
}
}
}
let agg_trade: String = format!("!ticker@arr");
let mut handler = WebSocketHandler {};
let mut web_socket: WebSockets = WebSockets::new();
web_socket.add_day_ticker_handler(&mut handler);
web_socket.connect(&agg_trade).unwrap(); // check error
web_socket.event_loop();
}
fn kline_websocket() {
struct WebSocketHandler;
impl KlineEventHandler for WebSocketHandler {
fn kline_handler(&mut self, event: &KlineEvent) {
println!(
"Symbol: {}, high: {}, low: {}",
event.kline.symbol, event.kline.low, event.kline.high
);
}
}
let kline: String = format!("{}", "ethbtc@kline_1m");
let mut handler = WebSocketHandler {};
let mut web_socket: WebSockets = WebSockets::new();
web_socket.add_kline_handler(&mut handler);
web_socket.connect(&kline).unwrap(); // check error
web_socket.event_loop();
}
| 31.348387 | 82 | 0.581395 |
b97c08b5bded677198cf124bc211d11f9d97937d | 37,465 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Reduced graph building
//!
//! Here we build the "reduced graph": the graph of the module tree without
//! any imports resolved.
use macros::{InvocationData, LegacyScope};
use resolve_imports::ImportDirective;
use resolve_imports::ImportDirectiveSubclass::{self, GlobImport, SingleImport};
use {Module, ModuleData, ModuleKind, NameBinding, NameBindingKind, ToNameBinding};
use {Resolver, ResolverArenas};
use Namespace::{self, TypeNS, ValueNS, MacroNS};
use {resolve_error, resolve_struct_error, ResolutionError};
use rustc::middle::cstore::LoadedMacro;
use rustc::hir::def::*;
use rustc::hir::def_id::{BUILTIN_MACROS_CRATE, CRATE_DEF_INDEX, LOCAL_CRATE, DefId};
use rustc::ty;
use std::cell::Cell;
use std::rc::Rc;
use syntax::ast::{Name, Ident};
use syntax::attr;
use syntax::ast::{self, Block, ForeignItem, ForeignItemKind, Item, ItemKind};
use syntax::ast::{Mutability, StmtKind, TraitItem, TraitItemKind};
use syntax::ast::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple};
use syntax::ext::base::SyntaxExtension;
use syntax::ext::base::Determinacy::Undetermined;
use syntax::ext::hygiene::Mark;
use syntax::ext::tt::macro_rules;
use syntax::parse::token;
use syntax::symbol::keywords;
use syntax::visit::{self, Visitor};
use syntax_pos::{Span, DUMMY_SP};
impl<'a> ToNameBinding<'a> for (Module<'a>, ty::Visibility, Span, Mark) {
fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
arenas.alloc_name_binding(NameBinding {
kind: NameBindingKind::Module(self.0),
vis: self.1,
span: self.2,
expansion: self.3,
})
}
}
impl<'a> ToNameBinding<'a> for (Def, ty::Visibility, Span, Mark) {
fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
arenas.alloc_name_binding(NameBinding {
kind: NameBindingKind::Def(self.0),
vis: self.1,
span: self.2,
expansion: self.3,
})
}
}
#[derive(Default, PartialEq, Eq)]
struct LegacyMacroImports {
import_all: Option<Span>,
imports: Vec<(Name, Span)>,
reexports: Vec<(Name, Span)>,
}
impl<'a> Resolver<'a> {
/// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined;
/// otherwise, reports an error.
pub fn define<T>(&mut self, parent: Module<'a>, ident: Ident, ns: Namespace, def: T)
where T: ToNameBinding<'a>,
{
let binding = def.to_name_binding(self.arenas);
if let Err(old_binding) = self.try_define(parent, ident, ns, binding) {
self.report_conflict(parent, ident, ns, old_binding, &binding);
}
}
fn block_needs_anonymous_module(&mut self, block: &Block) -> bool {
// If any statements are items, we need to create an anonymous module
block.stmts.iter().any(|statement| match statement.node {
StmtKind::Item(_) | StmtKind::Mac(_) => true,
_ => false,
})
}
fn insert_field_names(&mut self, def_id: DefId, field_names: Vec<Name>) {
if !field_names.is_empty() {
self.field_names.insert(def_id, field_names);
}
}
/// Constructs the reduced graph for one item.
fn build_reduced_graph_for_item(&mut self, item: &Item, expansion: Mark) {
let parent = self.current_module;
let ident = item.ident;
let sp = item.span;
let vis = self.resolve_visibility(&item.vis);
match item.node {
ItemKind::Use(ref view_path) => {
// Extract and intern the module part of the path. For
// globs and lists, the path is found directly in the AST;
// for simple paths we have to munge the path a little.
let mut module_path: Vec<_> = match view_path.node {
ViewPathSimple(_, ref full_path) => {
full_path.segments
.split_last()
.unwrap()
.1
.iter()
.map(|seg| seg.identifier)
.collect()
}
ViewPathGlob(ref module_ident_path) |
ViewPathList(ref module_ident_path, _) => {
module_ident_path.segments
.iter()
.map(|seg| seg.identifier)
.collect()
}
};
// This can be removed once warning cycle #36888 is complete.
if module_path.len() >= 2 && module_path[0].name == keywords::CrateRoot.name() &&
token::Ident(module_path[1]).is_path_segment_keyword() {
module_path.remove(0);
}
// Build up the import directives.
let is_prelude = attr::contains_name(&item.attrs, "prelude_import");
match view_path.node {
ViewPathSimple(mut binding, ref full_path) => {
let mut source = full_path.segments.last().unwrap().identifier;
let source_name = source.name;
if source_name == "mod" || source_name == "self" {
resolve_error(self,
view_path.span,
ResolutionError::SelfImportsOnlyAllowedWithin);
} else if source_name == "$crate" && full_path.segments.len() == 1 {
let crate_root = self.resolve_crate_root(source.ctxt);
let crate_name = match crate_root.kind {
ModuleKind::Def(_, name) => name,
ModuleKind::Block(..) => unreachable!(),
};
source.name = crate_name;
if binding.name == "$crate" {
binding.name = crate_name;
}
self.session.struct_span_warn(item.span, "`$crate` may not be imported")
.note("`use $crate;` was erroneously allowed and \
will become a hard error in a future release")
.emit();
}
let subclass = SingleImport {
target: binding,
source: source,
result: self.per_ns(|_, _| Cell::new(Err(Undetermined))),
type_ns_only: false,
};
self.add_import_directive(
module_path, subclass, view_path.span, item.id, vis, expansion,
);
}
ViewPathList(_, ref source_items) => {
// Make sure there's at most one `mod` import in the list.
let mod_spans = source_items.iter().filter_map(|item| {
if item.node.name.name == keywords::SelfValue.name() {
Some(item.span)
} else {
None
}
}).collect::<Vec<Span>>();
if mod_spans.len() > 1 {
let mut e = resolve_struct_error(self,
mod_spans[0],
ResolutionError::SelfImportCanOnlyAppearOnceInTheList);
for other_span in mod_spans.iter().skip(1) {
e.span_note(*other_span, "another `self` import appears here");
}
e.emit();
}
for source_item in source_items {
let node = source_item.node;
let (module_path, ident, rename, type_ns_only) = {
if node.name.name != keywords::SelfValue.name() {
let rename = node.rename.unwrap_or(node.name);
(module_path.clone(), node.name, rename, false)
} else {
let ident = *module_path.last().unwrap();
if ident.name == keywords::CrateRoot.name() {
resolve_error(
self,
source_item.span,
ResolutionError::
SelfImportOnlyInImportListWithNonEmptyPrefix
);
continue;
}
let module_path = module_path.split_last().unwrap().1;
let rename = node.rename.unwrap_or(ident);
(module_path.to_vec(), ident, rename, true)
}
};
let subclass = SingleImport {
target: rename,
source: ident,
result: self.per_ns(|_, _| Cell::new(Err(Undetermined))),
type_ns_only: type_ns_only,
};
let id = source_item.node.id;
self.add_import_directive(
module_path, subclass, source_item.span, id, vis, expansion,
);
}
}
ViewPathGlob(_) => {
let subclass = GlobImport {
is_prelude: is_prelude,
max_vis: Cell::new(ty::Visibility::Invisible),
};
self.add_import_directive(
module_path, subclass, view_path.span, item.id, vis, expansion,
);
}
}
}
ItemKind::ExternCrate(_) => {
self.crate_loader.process_item(item, &self.definitions);
// n.b. we don't need to look at the path option here, because cstore already did
let crate_id = self.session.cstore.extern_mod_stmt_cnum(item.id).unwrap();
let module =
self.get_module(DefId { krate: crate_id, index: CRATE_DEF_INDEX });
self.populate_module_if_necessary(module);
let used = self.process_legacy_macro_imports(item, module, expansion);
let binding =
(module, ty::Visibility::Public, sp, expansion).to_name_binding(self.arenas);
let directive = self.arenas.alloc_import_directive(ImportDirective {
id: item.id,
parent: parent,
imported_module: Cell::new(Some(module)),
subclass: ImportDirectiveSubclass::ExternCrate,
span: item.span,
module_path: Vec::new(),
vis: Cell::new(vis),
expansion: expansion,
used: Cell::new(used),
});
self.potentially_unused_imports.push(directive);
let imported_binding = self.import(binding, directive);
self.define(parent, ident, TypeNS, imported_binding);
}
ItemKind::GlobalAsm(..) => {}
ItemKind::Mod(..) if item.ident == keywords::Invalid.ident() => {} // Crate root
ItemKind::Mod(..) => {
let def_id = self.definitions.local_def_id(item.id);
let module_kind = ModuleKind::Def(Def::Mod(def_id), ident.name);
let module = self.arenas.alloc_module(ModuleData {
no_implicit_prelude: parent.no_implicit_prelude || {
attr::contains_name(&item.attrs, "no_implicit_prelude")
},
..ModuleData::new(Some(parent), module_kind, def_id, expansion, item.span)
});
self.define(parent, ident, TypeNS, (module, vis, sp, expansion));
self.module_map.insert(def_id, module);
// Descend into the module.
self.current_module = module;
}
ItemKind::ForeignMod(..) => self.crate_loader.process_item(item, &self.definitions),
// These items live in the value namespace.
ItemKind::Static(_, m, _) => {
let mutbl = m == Mutability::Mutable;
let def = Def::Static(self.definitions.local_def_id(item.id), mutbl);
self.define(parent, ident, ValueNS, (def, vis, sp, expansion));
}
ItemKind::Const(..) => {
let def = Def::Const(self.definitions.local_def_id(item.id));
self.define(parent, ident, ValueNS, (def, vis, sp, expansion));
}
ItemKind::Fn(..) => {
let def = Def::Fn(self.definitions.local_def_id(item.id));
self.define(parent, ident, ValueNS, (def, vis, sp, expansion));
}
// These items live in the type namespace.
ItemKind::Ty(..) => {
let def = Def::TyAlias(self.definitions.local_def_id(item.id));
self.define(parent, ident, TypeNS, (def, vis, sp, expansion));
}
ItemKind::Enum(ref enum_definition, _) => {
let def = Def::Enum(self.definitions.local_def_id(item.id));
let module_kind = ModuleKind::Def(def, ident.name);
let module = self.new_module(parent,
module_kind,
parent.normal_ancestor_id,
expansion,
item.span);
self.define(parent, ident, TypeNS, (module, vis, sp, expansion));
for variant in &(*enum_definition).variants {
self.build_reduced_graph_for_variant(variant, module, vis, expansion);
}
}
// These items live in both the type and value namespaces.
ItemKind::Struct(ref struct_def, _) => {
// Define a name in the type namespace.
let def = Def::Struct(self.definitions.local_def_id(item.id));
self.define(parent, ident, TypeNS, (def, vis, sp, expansion));
// Record field names for error reporting.
let mut ctor_vis = vis;
let field_names = struct_def.fields().iter().filter_map(|field| {
let field_vis = self.resolve_visibility(&field.vis);
if ctor_vis.is_at_least(field_vis, &*self) {
ctor_vis = field_vis;
}
field.ident.map(|ident| ident.name)
}).collect();
let item_def_id = self.definitions.local_def_id(item.id);
self.insert_field_names(item_def_id, field_names);
// If this is a tuple or unit struct, define a name
// in the value namespace as well.
if !struct_def.is_struct() {
let ctor_def = Def::StructCtor(self.definitions.local_def_id(struct_def.id()),
CtorKind::from_ast(struct_def));
self.define(parent, ident, ValueNS, (ctor_def, ctor_vis, sp, expansion));
self.struct_constructors.insert(def.def_id(), (ctor_def, ctor_vis));
}
}
ItemKind::Union(ref vdata, _) => {
let def = Def::Union(self.definitions.local_def_id(item.id));
self.define(parent, ident, TypeNS, (def, vis, sp, expansion));
// Record field names for error reporting.
let field_names = vdata.fields().iter().filter_map(|field| {
self.resolve_visibility(&field.vis);
field.ident.map(|ident| ident.name)
}).collect();
let item_def_id = self.definitions.local_def_id(item.id);
self.insert_field_names(item_def_id, field_names);
}
ItemKind::DefaultImpl(..) | ItemKind::Impl(..) => {}
ItemKind::Trait(..) => {
let def_id = self.definitions.local_def_id(item.id);
// Add all the items within to a new module.
let module_kind = ModuleKind::Def(Def::Trait(def_id), ident.name);
let module = self.new_module(parent,
module_kind,
parent.normal_ancestor_id,
expansion,
item.span);
self.define(parent, ident, TypeNS, (module, vis, sp, expansion));
self.current_module = module;
}
ItemKind::MacroDef(..) | ItemKind::Mac(_) => unreachable!(),
}
}
// Constructs the reduced graph for one variant. Variants exist in the
// type and value namespaces.
fn build_reduced_graph_for_variant(&mut self,
variant: &Variant,
parent: Module<'a>,
vis: ty::Visibility,
expansion: Mark) {
let ident = variant.node.name;
let def_id = self.definitions.local_def_id(variant.node.data.id());
// Define a name in the type namespace.
let def = Def::Variant(def_id);
self.define(parent, ident, TypeNS, (def, vis, variant.span, expansion));
// Define a constructor name in the value namespace.
// Braced variants, unlike structs, generate unusable names in
// value namespace, they are reserved for possible future use.
let ctor_kind = CtorKind::from_ast(&variant.node.data);
let ctor_def = Def::VariantCtor(def_id, ctor_kind);
self.define(parent, ident, ValueNS, (ctor_def, vis, variant.span, expansion));
}
/// Constructs the reduced graph for one foreign item.
fn build_reduced_graph_for_foreign_item(&mut self, item: &ForeignItem, expansion: Mark) {
let def = match item.node {
ForeignItemKind::Fn(..) => {
Def::Fn(self.definitions.local_def_id(item.id))
}
ForeignItemKind::Static(_, m) => {
Def::Static(self.definitions.local_def_id(item.id), m)
}
};
let parent = self.current_module;
let vis = self.resolve_visibility(&item.vis);
self.define(parent, item.ident, ValueNS, (def, vis, item.span, expansion));
}
fn build_reduced_graph_for_block(&mut self, block: &Block, expansion: Mark) {
let parent = self.current_module;
if self.block_needs_anonymous_module(block) {
let module = self.new_module(parent,
ModuleKind::Block(block.id),
parent.normal_ancestor_id,
expansion,
block.span);
self.block_map.insert(block.id, module);
self.current_module = module; // Descend into the block.
}
}
/// Builds the reduced graph for a single item in an external crate.
fn build_reduced_graph_for_external_crate_def(&mut self, parent: Module<'a>, child: Export) {
let ident = child.ident;
let def = child.def;
let def_id = def.def_id();
let vis = self.session.cstore.visibility(def_id);
let span = child.span;
let expansion = Mark::root(); // FIXME(jseyfried) intercrate hygiene
match def {
Def::Mod(..) | Def::Enum(..) => {
let module = self.new_module(parent,
ModuleKind::Def(def, ident.name),
def_id,
expansion,
span);
self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion));
}
Def::Variant(..) | Def::TyAlias(..) => {
self.define(parent, ident, TypeNS, (def, vis, DUMMY_SP, expansion));
}
Def::Fn(..) | Def::Static(..) | Def::Const(..) | Def::VariantCtor(..) => {
self.define(parent, ident, ValueNS, (def, vis, DUMMY_SP, expansion));
}
Def::StructCtor(..) => {
self.define(parent, ident, ValueNS, (def, vis, DUMMY_SP, expansion));
if let Some(struct_def_id) =
self.session.cstore.def_key(def_id).parent
.map(|index| DefId { krate: def_id.krate, index: index }) {
self.struct_constructors.insert(struct_def_id, (def, vis));
}
}
Def::Trait(..) => {
let module_kind = ModuleKind::Def(def, ident.name);
let module = self.new_module(parent,
module_kind,
parent.normal_ancestor_id,
expansion,
span);
self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion));
for child in self.session.cstore.item_children(def_id, self.session) {
let ns = if let Def::AssociatedTy(..) = child.def { TypeNS } else { ValueNS };
self.define(module, child.ident, ns,
(child.def, ty::Visibility::Public, DUMMY_SP, expansion));
if self.session.cstore.associated_item_cloned(child.def.def_id())
.method_has_self_argument {
self.has_self.insert(child.def.def_id());
}
}
module.populated.set(true);
}
Def::Struct(..) | Def::Union(..) => {
self.define(parent, ident, TypeNS, (def, vis, DUMMY_SP, expansion));
// Record field names for error reporting.
let field_names = self.session.cstore.struct_field_names(def_id);
self.insert_field_names(def_id, field_names);
}
Def::Macro(..) => {
self.define(parent, ident, MacroNS, (def, vis, DUMMY_SP, expansion));
}
_ => bug!("unexpected definition: {:?}", def)
}
}
pub fn get_module(&mut self, def_id: DefId) -> Module<'a> {
if def_id.krate == LOCAL_CRATE {
return self.module_map[&def_id]
}
let macros_only = self.session.cstore.dep_kind(def_id.krate).macros_only();
if let Some(&module) = self.extern_module_map.get(&(def_id, macros_only)) {
return module;
}
let (name, parent) = if def_id.index == CRATE_DEF_INDEX {
(self.session.cstore.crate_name(def_id.krate), None)
} else {
let def_key = self.session.cstore.def_key(def_id);
(def_key.disambiguated_data.data.get_opt_name().unwrap(),
Some(self.get_module(DefId { index: def_key.parent.unwrap(), ..def_id })))
};
let kind = ModuleKind::Def(Def::Mod(def_id), name);
let module =
self.arenas.alloc_module(ModuleData::new(parent, kind, def_id, Mark::root(), DUMMY_SP));
self.extern_module_map.insert((def_id, macros_only), module);
module
}
pub fn macro_def_scope(&mut self, expansion: Mark) -> Module<'a> {
let def_id = self.macro_defs[&expansion];
if let Some(id) = self.definitions.as_local_node_id(def_id) {
self.local_macro_def_scopes[&id]
} else if def_id.krate == BUILTIN_MACROS_CRATE {
// FIXME(jseyfried): This happens when `include!()`ing a `$crate::` path, c.f, #40469.
self.graph_root
} else {
let module_def_id = ty::DefIdTree::parent(&*self, def_id).unwrap();
self.get_module(module_def_id)
}
}
pub fn get_macro(&mut self, def: Def) -> Rc<SyntaxExtension> {
let def_id = match def {
Def::Macro(def_id, ..) => def_id,
_ => panic!("Expected Def::Macro(..)"),
};
if let Some(ext) = self.macro_map.get(&def_id) {
return ext.clone();
}
let macro_def = match self.session.cstore.load_macro(def_id, &self.session) {
LoadedMacro::MacroDef(macro_def) => macro_def,
LoadedMacro::ProcMacro(ext) => return ext,
};
let ext = Rc::new(macro_rules::compile(&self.session.parse_sess,
&self.session.features,
¯o_def));
self.macro_map.insert(def_id, ext.clone());
ext
}
/// Ensures that the reduced graph rooted at the given external module
/// is built, building it if it is not.
pub fn populate_module_if_necessary(&mut self, module: Module<'a>) {
if module.populated.get() { return }
for child in self.session.cstore.item_children(module.def_id().unwrap(), self.session) {
self.build_reduced_graph_for_external_crate_def(module, child);
}
module.populated.set(true)
}
fn legacy_import_macro(&mut self,
name: Name,
binding: &'a NameBinding<'a>,
span: Span,
allow_shadowing: bool) {
if self.global_macros.insert(name, binding).is_some() && !allow_shadowing {
let msg = format!("`{}` is already in scope", name);
let note =
"macro-expanded `#[macro_use]`s may not shadow existing macros (see RFC 1560)";
self.session.struct_span_err(span, &msg).note(note).emit();
}
}
// This returns true if we should consider the underlying `extern crate` to be used.
fn process_legacy_macro_imports(&mut self, item: &Item, module: Module<'a>, expansion: Mark)
-> bool {
let allow_shadowing = expansion == Mark::root();
let legacy_imports = self.legacy_macro_imports(&item.attrs);
let mut used = legacy_imports != LegacyMacroImports::default();
// `#[macro_use]` and `#[macro_reexport]` are only allowed at the crate root.
if self.current_module.parent.is_some() && used {
span_err!(self.session, item.span, E0468,
"an `extern crate` loading macros must be at the crate root");
} else if !self.use_extern_macros && !used &&
self.session.cstore.dep_kind(module.def_id().unwrap().krate).macros_only() {
let msg = "proc macro crates and `#[no_link]` crates have no effect without \
`#[macro_use]`";
self.session.span_warn(item.span, msg);
used = true; // Avoid the normal unused extern crate warning
}
let (graph_root, arenas) = (self.graph_root, self.arenas);
let macro_use_directive = |span| arenas.alloc_import_directive(ImportDirective {
id: item.id,
parent: graph_root,
imported_module: Cell::new(Some(module)),
subclass: ImportDirectiveSubclass::MacroUse,
span: span,
module_path: Vec::new(),
vis: Cell::new(ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX))),
expansion: expansion,
used: Cell::new(false),
});
if let Some(span) = legacy_imports.import_all {
let directive = macro_use_directive(span);
self.potentially_unused_imports.push(directive);
module.for_each_child(|ident, ns, binding| if ns == MacroNS {
let imported_binding = self.import(binding, directive);
self.legacy_import_macro(ident.name, imported_binding, span, allow_shadowing);
});
} else {
for (name, span) in legacy_imports.imports {
let ident = Ident::with_empty_ctxt(name);
let result = self.resolve_ident_in_module(module, ident, MacroNS,
false, false, span);
if let Ok(binding) = result {
let directive = macro_use_directive(span);
self.potentially_unused_imports.push(directive);
let imported_binding = self.import(binding, directive);
self.legacy_import_macro(name, imported_binding, span, allow_shadowing);
} else {
span_err!(self.session, span, E0469, "imported macro not found");
}
}
}
for (name, span) in legacy_imports.reexports {
self.session.cstore.export_macros(module.def_id().unwrap().krate);
let ident = Ident::with_empty_ctxt(name);
let result = self.resolve_ident_in_module(module, ident, MacroNS, false, false, span);
if let Ok(binding) = result {
self.macro_exports.push(Export { ident: ident, def: binding.def(), span: span });
} else {
span_err!(self.session, span, E0470, "reexported macro not found");
}
}
used
}
// does this attribute list contain "macro_use"?
fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool {
for attr in attrs {
if attr.check_name("macro_escape") {
let msg = "macro_escape is a deprecated synonym for macro_use";
let mut err = self.session.struct_span_warn(attr.span, msg);
if let ast::AttrStyle::Inner = attr.style {
err.help("consider an outer attribute, #[macro_use] mod ...").emit();
} else {
err.emit();
}
} else if !attr.check_name("macro_use") {
continue;
}
if !attr.is_word() {
self.session.span_err(attr.span, "arguments to macro_use are not allowed here");
}
return true;
}
false
}
fn legacy_macro_imports(&mut self, attrs: &[ast::Attribute]) -> LegacyMacroImports {
let mut imports = LegacyMacroImports::default();
for attr in attrs {
if attr.check_name("macro_use") {
match attr.meta_item_list() {
Some(names) => for attr in names {
if let Some(word) = attr.word() {
imports.imports.push((word.name(), attr.span()));
} else {
span_err!(self.session, attr.span(), E0466, "bad macro import");
}
},
None => imports.import_all = Some(attr.span),
}
} else if attr.check_name("macro_reexport") {
let bad_macro_reexport = |this: &mut Self, span| {
span_err!(this.session, span, E0467, "bad macro reexport");
};
if let Some(names) = attr.meta_item_list() {
for attr in names {
if let Some(word) = attr.word() {
imports.reexports.push((word.name(), attr.span()));
} else {
bad_macro_reexport(self, attr.span());
}
}
} else {
bad_macro_reexport(self, attr.span());
}
}
}
imports
}
}
pub struct BuildReducedGraphVisitor<'a, 'b: 'a> {
pub resolver: &'a mut Resolver<'b>,
pub legacy_scope: LegacyScope<'b>,
pub expansion: Mark,
}
impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
fn visit_invoc(&mut self, id: ast::NodeId) -> &'b InvocationData<'b> {
let mark = id.placeholder_to_mark();
self.resolver.current_module.unresolved_invocations.borrow_mut().insert(mark);
let invocation = self.resolver.invocations[&mark];
invocation.module.set(self.resolver.current_module);
invocation.legacy_scope.set(self.legacy_scope);
invocation
}
}
macro_rules! method {
($visit:ident: $ty:ty, $invoc:path, $walk:ident) => {
fn $visit(&mut self, node: &'a $ty) {
if let $invoc(..) = node.node {
self.visit_invoc(node.id);
} else {
visit::$walk(self, node);
}
}
}
}
impl<'a, 'b> Visitor<'a> for BuildReducedGraphVisitor<'a, 'b> {
method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item);
method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr);
method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat);
method!(visit_ty: ast::Ty, ast::TyKind::Mac, walk_ty);
fn visit_item(&mut self, item: &'a Item) {
let macro_use = match item.node {
ItemKind::MacroDef(..) => {
self.resolver.define_macro(item, self.expansion, &mut self.legacy_scope);
return
}
ItemKind::Mac(..) => {
self.legacy_scope = LegacyScope::Expansion(self.visit_invoc(item.id));
return
}
ItemKind::Mod(..) => self.resolver.contains_macro_use(&item.attrs),
_ => false,
};
let (parent, legacy_scope) = (self.resolver.current_module, self.legacy_scope);
self.resolver.build_reduced_graph_for_item(item, self.expansion);
visit::walk_item(self, item);
self.resolver.current_module = parent;
if !macro_use {
self.legacy_scope = legacy_scope;
}
}
fn visit_stmt(&mut self, stmt: &'a ast::Stmt) {
if let ast::StmtKind::Mac(..) = stmt.node {
self.legacy_scope = LegacyScope::Expansion(self.visit_invoc(stmt.id));
} else {
visit::walk_stmt(self, stmt);
}
}
fn visit_foreign_item(&mut self, foreign_item: &'a ForeignItem) {
self.resolver.build_reduced_graph_for_foreign_item(foreign_item, self.expansion);
visit::walk_foreign_item(self, foreign_item);
}
fn visit_block(&mut self, block: &'a Block) {
let (parent, legacy_scope) = (self.resolver.current_module, self.legacy_scope);
self.resolver.build_reduced_graph_for_block(block, self.expansion);
visit::walk_block(self, block);
self.resolver.current_module = parent;
self.legacy_scope = legacy_scope;
}
fn visit_trait_item(&mut self, item: &'a TraitItem) {
let parent = self.resolver.current_module;
if let TraitItemKind::Macro(_) = item.node {
self.visit_invoc(item.id);
return
}
// Add the item to the trait info.
let item_def_id = self.resolver.definitions.local_def_id(item.id);
let (def, ns) = match item.node {
TraitItemKind::Const(..) => (Def::AssociatedConst(item_def_id), ValueNS),
TraitItemKind::Method(ref sig, _) => {
if sig.decl.has_self() {
self.resolver.has_self.insert(item_def_id);
}
(Def::Method(item_def_id), ValueNS)
}
TraitItemKind::Type(..) => (Def::AssociatedTy(item_def_id), TypeNS),
TraitItemKind::Macro(_) => bug!(), // handled above
};
let vis = ty::Visibility::Public;
self.resolver.define(parent, item.ident, ns, (def, vis, item.span, self.expansion));
self.resolver.current_module = parent.parent.unwrap(); // nearest normal ancestor
visit::walk_trait_item(self, item);
self.resolver.current_module = parent;
}
}
| 45.467233 | 100 | 0.514213 |
16c018195e8701b559b84836cfd742cb03f3a466 | 4,652 | mod types;
use std::rc::Rc;
use deno_doc::{parser::DocFileLoader, DocError, DocParser};
use futures::future::LocalBoxFuture;
use js_sys::{Array, Promise};
use swc_ecmascript::parser::{EsConfig, Syntax, TsConfig};
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(typescript_type = "FileLoader")]
pub type FileLoader;
#[wasm_bindgen(method, catch, js_name = "resolve")]
pub fn resolve_js(
this: &FileLoader,
specifier: &str,
referrer: &str,
) -> Result<String, JsValue>;
#[wasm_bindgen(method, catch, js_name = "loadSourceCode")]
pub async fn load_source_code_js(
this: &FileLoader,
specifier: &str,
) -> Result<JsValue, JsValue>;
}
impl DocFileLoader for FileLoader {
fn resolve(&self, specifier: &str, referrer: &str) -> Result<String, deno_doc::DocError> {
self.resolve_js(specifier, referrer).map_err(|e: JsValue| {
let error = js_sys::Error::from(e);
DocError::Resolve(error.message().into())
})
}
fn load_source_code(
&self,
specifier: &str,
) -> LocalBoxFuture<Result<(Syntax, String), deno_doc::DocError>> {
let specifier = specifier.to_string();
Box::pin(async move {
let tuple: JsValue =
self.load_source_code_js(&specifier)
.await
.map_err(|e: JsValue| {
let error = js_sys::Error::from(e);
DocError::Resolve(error.message().into())
})?;
let tuple = Array::from(&tuple);
let syntax: Syntax = tuple
.get(0)
.into_serde()
.expect("invalid layout of Syntax, use typescript to make this a compile error");
let code = tuple
.get(1)
.as_string()
.expect("expected code string, use typescript to make this a compile error");
Ok((syntax, code))
})
}
}
#[wasm_bindgen]
pub struct Parser {
inner: Rc<DocParser>,
}
#[wasm_bindgen]
impl Parser {
#[wasm_bindgen(constructor)]
pub fn new(file_loader: FileLoader, private_items: bool) -> Self {
// TODO: Move
console_error_panic_hook::set_once();
Self {
inner: Rc::new(DocParser::new(Box::new(file_loader), private_items)),
}
}
#[wasm_bindgen(method, js_name = "parseModule")]
pub fn parse_module(
&self,
file_name: String,
syntax: JsValue,
source_code: String,
) -> Result<JsValue, JsValue> {
let syntax: Syntax = syntax.into_serde().expect("invalid syntax");
self.inner
.parse_module(&file_name, syntax, &source_code)
.map(|module_doc| JsValue::from_serde(&module_doc).unwrap())
.map_err(|e| JsValue::from(e.to_string()))
}
#[wasm_bindgen(method)]
pub fn parse(&self, file_name: String) -> Promise {
let parser = self.inner.clone();
wasm_bindgen_futures::future_to_promise(async move {
let doc_nodes = parser
.parse(&file_name)
.await
.map_err(|e| JsValue::from(e.to_string()))?;
JsValue::from_serde(&doc_nodes).map_err(|e| JsValue::from(e.to_string()))
})
}
#[wasm_bindgen(method, js_name = "parseSource")]
pub fn parse_source(
&self,
file_name: String,
syntax: JsValue,
source_code: String,
) -> Result<JsValue, JsValue> {
let syntax: Syntax = syntax.into_serde().expect("invalid syntax");
self.inner
.parse_source(&file_name, syntax, &source_code)
.map(|doc_nodes| JsValue::from_serde(&doc_nodes).unwrap())
.map_err(|e| JsValue::from(e.to_string()))
}
#[wasm_bindgen(method, js_name = "parseWithReexports")]
pub fn parse_with_reexports(&self, file_name: String) -> Promise {
let parser = self.inner.clone();
wasm_bindgen_futures::future_to_promise(async move {
let doc_nodes = parser
.parse_with_reexports(&file_name)
.await
.map_err(|e| JsValue::from(e.to_string()))?;
JsValue::from_serde(&doc_nodes).map_err(|e| JsValue::from(e.to_string()))
})
}
}
#[wasm_bindgen(js_name = "defaultTsConfig")]
pub fn default_ts_config() -> JsValue {
JsValue::from_serde(&TsConfig::default()).unwrap()
}
#[wasm_bindgen(js_name = "defaultEsConfig")]
pub fn default_es_config() -> JsValue {
JsValue::from_serde(&EsConfig::default()).unwrap()
}
| 32.531469 | 97 | 0.582115 |
cc95b3a7056a204dcca968e4b813526a2bca4adc | 6,877 | use crate::{
command::{Command, CommandSystem, Context},
utils::snapshot::SnapshotLocal,
};
use anyhow::{anyhow, Error, Result};
use chrono::{SecondsFormat, Utc};
use std::{
path::{Path, PathBuf},
str::FromStr,
};
use uuid::Uuid;
#[cfg(test)]
mod tests;
pub trait BtrfsCommands {
/// Get subvolumes
///
/// * `context` - context in which to execute the command
///
fn get_subvolumes(&mut self, context: &Context) -> Result<Vec<Subvolume>>;
/// Create a snapshot locally
///
/// The new snapshot will be created at the path `<snapshot_path>/<timestamp in rfc3339 format (UTC)>_<suffix>`.
/// This function executes the command `sudo -nu <user> bash -c "sudo btrfs subvolume snapshot -r \"<subvolume_path>\" \"<snapshot_path>/<timestamp in rfc3339 format (UTC)>_<suffix>\""`.
///
/// * `subvolume_path` - path to the subvolume from which to create the snapshot
/// * `snapshot_path` - base path at which the snapshot should be created
/// * `snapshot_suffix` - suffix of the subvolume
/// * `context` - context in which to execute the command
///
fn create_snapshot(
&mut self,
subvolume_path: &str,
snapshot_path: &str,
snapshot_suffix: &str,
context: &Context,
) -> Result<()>;
/// Delete a snapshot
///
/// Executes `btrfs subvolume delete <subvolume_path>`.
/// As a precaution, the subvolumes "home", "/home", "root", and "/" cannot be deleted.
///
/// * `subvolume_path` - absolute path of the snapshot to be deleted
/// * `context` - context in which to execute the command
///
fn delete_subvolume(&mut self, subvolume: &str, context: &Context) -> Result<()>;
/// Send a local snapshot to a remote host
///
/// * `local_snapshot` - snapshot to be sent
/// * `common_parent` - parent snapshot (must be available on the remote host as well)
/// * `context_local` - context to execute the local commands
/// * `backup_path` - base path to store the snapshot on the remote host
/// * `context_remote` - context to execute the remote commands
///
fn send_snapshot<'a>(
&mut self,
local_snapshot: &SnapshotLocal,
common_parent: Option<&'a SnapshotLocal>,
context_local: &Context,
backup_path: &str,
context_remote: &Context,
) -> Result<()>;
}
pub struct Btrfs {
command: Box<dyn Command>,
}
#[derive(Debug, PartialEq, Clone)]
pub struct Subvolume {
pub path: String,
pub uuid: Uuid,
pub parent_uuid: Option<Uuid>,
pub received_uuid: Option<Uuid>,
}
impl Default for Btrfs {
fn default() -> Self {
Btrfs {
command: Box::new(CommandSystem {}),
}
}
}
impl BtrfsCommands for Btrfs {
fn get_subvolumes(&mut self, context: &Context) -> Result<Vec<Subvolume>> {
let output = self
.command
.run("sudo btrfs subvolume list -tupqR --sort=rootid /", context)?;
self.extract_subvolumes(&output)
}
fn create_snapshot(
&mut self,
subvolume_path: &str,
snapshot_path: &str,
snapshot_suffix: &str,
context: &Context,
) -> Result<()> {
let snapshot_path_extension = format!(
"{}_{}",
Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true),
snapshot_suffix
);
let mut snapshot_path = PathBuf::from(&*snapshot_path);
snapshot_path.push(snapshot_path_extension);
let snapshot_path = snapshot_path
.as_path()
.to_str()
.ok_or(anyhow!("could not construct snapshot_path"))?;
self.command.run(
&*format!(
"btrfs subvolume snapshot -r \"{}\" \"{}\"",
subvolume_path, snapshot_path
),
context,
)?;
Ok(())
}
fn delete_subvolume(&mut self, subvolume: &str, context: &Context) -> Result<()> {
let subvolume_path = Path::new(subvolume).canonicalize()?;
let subvolume = subvolume_path
.as_path()
.to_str()
.ok_or(anyhow!("cannot canonicalize subvolume path"))?;
if vec!["home", "/home", "root", "/"].contains(&subvolume) {
return Err(anyhow!("subvolume cannot be deleted as its name is on the restricted names list (home, /home, /, root)"));
}
self.command
.run(
&format!("sudo btrfs subvolume delete \"{}\"", subvolume),
context,
)
.map(|_| ())
}
fn send_snapshot(
&mut self,
local_snapshot: &SnapshotLocal,
common_parent: Option<&SnapshotLocal>,
context_local: &Context,
backup_path: &str,
context_remote: &Context,
) -> Result<()> {
let mut parent_arg = String::new();
if let Some(parent_snapshot) = common_parent {
parent_arg = format!("-p \"{}\"", parent_snapshot.path);
}
self.command
.run_piped(&vec![
(
&*format!("sudo btrfs send {} \"{}\"", parent_arg, local_snapshot.path),
context_local,
),
(
&*format!("sudo btrfs receive \"{}\"", backup_path),
context_remote,
),
])
.map(|_| ())
}
}
impl Btrfs {
fn extract_subvolumes(&self, input: &String) -> Result<Vec<Subvolume>> {
let mut subvolumes: Vec<Subvolume> = Vec::new();
let mut lines = input.split("\n");
if lines
.next()
.ok_or(Error::msg("could not find header line"))?
.split_ascii_whitespace()
.collect::<Vec<&str>>()
!= vec![
"ID",
"gen",
"parent",
"top",
"level",
"parent_uuid",
"received_uuid",
"uuid",
"path",
]
{
return Err(Error::msg("unexpected header line").into());
}
for line in lines.skip(1).into_iter() {
let tokens: Vec<&str> = line.split_ascii_whitespace().collect();
if tokens.len() != 8 {
continue;
}
subvolumes.push(Subvolume {
path: format!("/{}", tokens[7]),
uuid: Uuid::from_str(tokens[6])?,
parent_uuid: match Uuid::from_str(tokens[4]) {
Ok(pu) => Some(pu),
Err(_) => None,
},
received_uuid: match Uuid::from_str(tokens[5]) {
Ok(ru) => Some(ru),
Err(_) => None,
},
});
}
Ok(subvolumes)
}
}
| 30.700893 | 190 | 0.528137 |
4a298f6de6b43aee612e9c3ecf674961abfe8c25 | 23,845 | #[cfg(feature = "verbose-errors")]
#[cfg(feature = "std")]
use internal::{Err, IResult};
#[cfg(feature = "verbose-errors")]
use verbose_errors::Context;
#[cfg(feature = "std")]
use std::collections::HashMap;
#[cfg(feature = "alloc")]
use lib::std::string::ToString;
#[cfg(feature = "alloc")]
use lib::std::vec::Vec;
#[cfg(feature = "std")]
pub trait HexDisplay {
/// Converts the value of `self` to a hex dump, returning the owned
/// string.
fn to_hex(&self, chunk_size: usize) -> String;
/// Converts the value of `self` to a hex dump beginning at `from` address, returning the owned
/// string.
fn to_hex_from(&self, chunk_size: usize, from: usize) -> String;
}
#[cfg(feature = "std")]
static CHARS: &'static [u8] = b"0123456789abcdef";
#[cfg(feature = "std")]
impl HexDisplay for [u8] {
#[allow(unused_variables)]
fn to_hex(&self, chunk_size: usize) -> String {
self.to_hex_from(chunk_size, 0)
}
#[allow(unused_variables)]
fn to_hex_from(&self, chunk_size: usize, from: usize) -> String {
let mut v = Vec::with_capacity(self.len() * 3);
let mut i = from;
for chunk in self.chunks(chunk_size) {
let s = format!("{:08x}", i);
for &ch in s.as_bytes().iter() {
v.push(ch);
}
v.push(b'\t');
i += chunk_size;
for &byte in chunk {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
v.push(b' ');
}
if chunk_size > chunk.len() {
for j in 0..(chunk_size - chunk.len()) {
v.push(b' ');
v.push(b' ');
v.push(b' ');
}
}
v.push(b'\t');
for &byte in chunk {
if (byte >= 32 && byte <= 126) || byte >= 128 {
v.push(byte);
} else {
v.push(b'.');
}
}
v.push(b'\n');
}
String::from_utf8_lossy(&v[..]).into_owned()
}
}
#[cfg(feature = "std")]
impl HexDisplay for str {
#[allow(unused_variables)]
fn to_hex(&self, chunk_size: usize) -> String {
self.to_hex_from(chunk_size, 0)
}
#[allow(unused_variables)]
fn to_hex_from(&self, chunk_size: usize, from: usize) -> String {
self.as_bytes().to_hex_from(chunk_size, from)
}
}
/// Prints a message if the parser fails
///
/// The message prints the `Error` or `Incomplete`
/// and the parser's calling code
///
/// ```
/// # #[macro_use] extern crate snack;
/// # fn main() {
/// named!(f, dbg!( tag!( "abcd" ) ) );
///
/// let a = &b"efgh"[..];
///
/// // Will print the following message:
/// // Error(Position(0, [101, 102, 103, 104])) at l.5 by ' tag ! ( "abcd" ) '
/// f(a);
/// # }
/// ```
#[macro_export]
macro_rules! dbg (
($i: expr, $submac:ident!( $($args:tt)* )) => (
{
use $crate::lib::std::result::Result::*;
let l = line!();
match $submac!($i, $($args)*) {
Err(e) => {
println!("Err({:?}) at l.{} by ' {} '", e, l, stringify!($submac!($($args)*)));
Err(e)
},
a => a,
}
}
);
($i:expr, $f:ident) => (
dbg!($i, call!($f));
);
);
/// Prints a message and the input if the parser fails
///
/// The message prints the `Error` or `Incomplete`
/// and the parser's calling code.
///
/// It also displays the input in hexdump format
///
/// ```ignore
/// # #[macro_use] extern crate snack;
/// # fn main() {
/// named!(f, dbg_dmp!( tag!( "abcd" ) ) );
///
/// let a = &b"efghijkl"[..];
///
/// // Will print the following message:
/// // Error(Position(0, [101, 102, 103, 104, 105, 106, 107, 108])) at l.5 by ' tag ! ( "abcd" ) '
/// // 00000000 65 66 67 68 69 6a 6b 6c efghijkl
/// f(a);
/// # }
#[macro_export]
macro_rules! dbg_dmp (
($i: expr, $submac:ident!( $($args:tt)* )) => (
{
use $crate::HexDisplay;
let l = line!();
match $submac!($i, $($args)*) {
Err(e) => {
println!("Error({:?}) at l.{} by ' {} '\n{}", e, l, stringify!($submac!($($args)*)), $i.to_hex(8));
Err(e)
},
a => a,
}
}
);
($i:expr, $f:ident) => (
dbg_dmp!($i, call!($f));
);
);
#[cfg(feature = "verbose-errors")]
pub fn error_to_list<P: Clone, E: Clone>(e: &Context<P, E>) -> Vec<(P, ErrorKind<E>)> {
match e {
&Context::Code(ref i, ref err) => {
let mut v = Vec::new();
v.push((i.clone(), err.clone()));
return v;
}
&Context::List(ref v) => {
let mut v2 = v.clone();
v2.reverse();
v2
}
}
}
#[cfg(feature = "verbose-errors")]
pub fn compare_error_paths<P: Clone + PartialEq, E: Clone + PartialEq>(e1: &Context<P, E>, e2: &Context<P, E>) -> bool {
error_to_list(e1) == error_to_list(e2)
}
#[cfg(feature = "std")]
#[cfg(feature = "verbose-errors")]
use lib::std::hash::Hash;
#[cfg(feature = "std")]
#[cfg(feature = "verbose-errors")]
pub fn add_error_pattern<'a, I: Clone + Hash + Eq, O, E: Clone + Hash + Eq>(
h: &mut HashMap<Vec<(I, ErrorKind<E>)>, &'a str>,
res: IResult<I, O, E>,
message: &'a str,
) -> bool {
match res {
Err(Err::Error(e)) | Err(Err::Failure(e)) => {
h.insert(error_to_list(&e), message);
true
}
_ => false,
}
}
pub fn slice_to_offsets(input: &[u8], s: &[u8]) -> (usize, usize) {
let start = input.as_ptr();
let off1 = s.as_ptr() as usize - start as usize;
let off2 = off1 + s.len();
(off1, off2)
}
#[cfg(feature = "std")]
#[cfg(feature = "verbose-errors")]
pub fn prepare_errors<O, E: Clone>(input: &[u8], res: IResult<&[u8], O, E>) -> Option<Vec<(ErrorKind<E>, usize, usize)>> {
if let Err(Err::Error(e)) = res {
let mut v: Vec<(ErrorKind<E>, usize, usize)> = Vec::new();
match e {
Context::Code(p, kind) => {
let (o1, o2) = slice_to_offsets(input, p);
v.push((kind, o1, o2));
}
Context::List(mut l) => {
for (p, kind) in l.drain(..) {
let (o1, o2) = slice_to_offsets(input, p);
v.push((kind, o1, o2));
}
v.reverse()
}
}
v.sort_by(|a, b| a.1.cmp(&b.1));
Some(v)
} else {
None
}
}
#[cfg(feature = "std")]
#[cfg(feature = "verbose-errors")]
pub fn print_error<O, E: Clone>(input: &[u8], res: IResult<&[u8], O, E>) {
if let Some(v) = prepare_errors(input, res) {
let colors = generate_colors(&v);
println!("parser codes: {}", print_codes(&colors, &HashMap::new()));
println!("{}", print_offsets(input, 0, &v));
} else {
println!("not an error");
}
}
#[cfg(feature = "std")]
#[cfg(feature = "verbose-errors")]
pub fn generate_colors<E>(v: &[(ErrorKind<E>, usize, usize)]) -> HashMap<u32, u8> {
let mut h: HashMap<u32, u8> = HashMap::new();
let mut color = 0;
for &(ref c, _, _) in v.iter() {
h.insert(error_to_u32(c), color + 31);
color = color + 1 % 7;
}
h
}
pub fn code_from_offset<E>(v: &[(ErrorKind<E>, usize, usize)], offset: usize) -> Option<u32> {
let mut acc: Option<(u32, usize, usize)> = None;
for &(ref ek, s, e) in v.iter() {
let c = error_to_u32(ek);
if s <= offset && offset <= e {
if let Some((_, start, end)) = acc {
if start <= s && e <= end {
acc = Some((c, s, e));
}
} else {
acc = Some((c, s, e));
}
}
}
if let Some((code, _, _)) = acc {
return Some(code);
} else {
return None;
}
}
#[cfg(feature = "alloc")]
pub fn reset_color(v: &mut Vec<u8>) {
v.push(0x1B);
v.push(b'[');
v.push(0);
v.push(b'm');
}
#[cfg(feature = "alloc")]
pub fn write_color(v: &mut Vec<u8>, color: u8) {
v.push(0x1B);
v.push(b'[');
v.push(1);
v.push(b';');
let s = color.to_string();
let bytes = s.as_bytes();
v.extend(bytes.iter().cloned());
v.push(b'm');
}
#[cfg(feature = "std")]
#[cfg_attr(feature = "cargo-clippy", allow(implicit_hasher))]
pub fn print_codes(colors: &HashMap<u32, u8>, names: &HashMap<u32, &str>) -> String {
let mut v = Vec::new();
for (code, &color) in colors {
if let Some(&s) = names.get(code) {
let bytes = s.as_bytes();
write_color(&mut v, color);
v.extend(bytes.iter().cloned());
} else {
let s = code.to_string();
let bytes = s.as_bytes();
write_color(&mut v, color);
v.extend(bytes.iter().cloned());
}
reset_color(&mut v);
v.push(b' ');
}
reset_color(&mut v);
String::from_utf8_lossy(&v[..]).into_owned()
}
#[cfg(feature = "std")]
#[cfg(feature = "verbose-errors")]
pub fn print_offsets<E>(input: &[u8], from: usize, offsets: &[(ErrorKind<E>, usize, usize)]) -> String {
let mut v = Vec::with_capacity(input.len() * 3);
let mut i = from;
let chunk_size = 8;
let mut current_code: Option<u32> = None;
let mut current_code2: Option<u32> = None;
let colors = generate_colors(&offsets);
for chunk in input.chunks(chunk_size) {
let s = format!("{:08x}", i);
for &ch in s.as_bytes().iter() {
v.push(ch);
}
v.push(b'\t');
let mut k = i;
let mut l = i;
for &byte in chunk {
if let Some(code) = code_from_offset(&offsets, k) {
if let Some(current) = current_code {
if current != code {
reset_color(&mut v);
current_code = Some(code);
if let Some(&color) = colors.get(&code) {
write_color(&mut v, color);
}
}
} else {
current_code = Some(code);
if let Some(&color) = colors.get(&code) {
write_color(&mut v, color);
}
}
}
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
v.push(b' ');
k = k + 1;
}
reset_color(&mut v);
if chunk_size > chunk.len() {
for _ in 0..(chunk_size - chunk.len()) {
v.push(b' ');
v.push(b' ');
v.push(b' ');
}
}
v.push(b'\t');
for &byte in chunk {
if let Some(code) = code_from_offset(&offsets, l) {
if let Some(current) = current_code2 {
if current != code {
reset_color(&mut v);
current_code2 = Some(code);
if let Some(&color) = colors.get(&code) {
write_color(&mut v, color);
}
}
} else {
current_code2 = Some(code);
if let Some(&color) = colors.get(&code) {
write_color(&mut v, color);
}
}
}
if (byte >= 32 && byte <= 126) || byte >= 128 {
v.push(byte);
} else {
v.push(b'.');
}
l = l + 1;
}
reset_color(&mut v);
v.push(b'\n');
i = i + chunk_size;
}
String::from_utf8_lossy(&v[..]).into_owned()
}
/// indicates which parser returned an error
#[cfg_attr(rustfmt, rustfmt_skip)]
#[derive(Debug,PartialEq,Eq,Hash,Clone)]
#[allow(deprecated)]
pub enum ErrorKind<E = u32> {
Custom(E),
Tag,
MapRes,
MapOpt,
Alt,
IsNot,
IsA,
SeparatedList,
SeparatedNonEmptyList,
Many0,
Many1,
ManyTill,
Count,
TakeUntilAndConsume,
TakeUntil,
TakeUntilEitherAndConsume,
TakeUntilEither,
LengthValue,
TagClosure,
Alpha,
Digit,
HexDigit,
OctDigit,
AlphaNumeric,
Space,
MultiSpace,
LengthValueFn,
Eof,
ExprOpt,
ExprRes,
CondReduce,
Switch,
TagBits,
OneOf,
NoneOf,
Char,
CrLf,
RegexpMatch,
RegexpMatches,
RegexpFind,
RegexpCapture,
RegexpCaptures,
TakeWhile1,
Complete,
Fix,
Escaped,
EscapedTransform,
#[deprecated(since = "4.0.0", note = "Please use `Tag` instead")]
TagStr,
#[deprecated(since = "4.0.0", note = "Please use `IsNot` instead")]
IsNotStr,
#[deprecated(since = "4.0.0", note = "Please use `IsA` instead")]
IsAStr,
#[deprecated(since = "4.0.0", note = "Please use `TakeWhile1` instead")]
TakeWhile1Str,
NonEmpty,
ManyMN,
#[deprecated(since = "4.0.0", note = "Please use `TakeUntilAndConsume` instead")]
TakeUntilAndConsumeStr,
#[deprecated(since = "4.0.0", note = "Please use `TakeUntil` instead")]
TakeUntilStr,
Not,
Permutation,
Verify,
TakeTill1,
TakeUntilAndConsume1,
TakeWhileMN,
ParseTo,
}
#[cfg_attr(rustfmt, rustfmt_skip)]
#[allow(deprecated)]
pub fn error_to_u32<E>(e: &ErrorKind<E>) -> u32 {
match *e {
ErrorKind::Custom(_) => 0,
ErrorKind::Tag => 1,
ErrorKind::MapRes => 2,
ErrorKind::MapOpt => 3,
ErrorKind::Alt => 4,
ErrorKind::IsNot => 5,
ErrorKind::IsA => 6,
ErrorKind::SeparatedList => 7,
ErrorKind::SeparatedNonEmptyList => 8,
ErrorKind::Many1 => 9,
ErrorKind::Count => 10,
ErrorKind::TakeUntilAndConsume => 11,
ErrorKind::TakeUntil => 12,
ErrorKind::TakeUntilEitherAndConsume => 13,
ErrorKind::TakeUntilEither => 14,
ErrorKind::LengthValue => 15,
ErrorKind::TagClosure => 16,
ErrorKind::Alpha => 17,
ErrorKind::Digit => 18,
ErrorKind::AlphaNumeric => 19,
ErrorKind::Space => 20,
ErrorKind::MultiSpace => 21,
ErrorKind::LengthValueFn => 22,
ErrorKind::Eof => 23,
ErrorKind::ExprOpt => 24,
ErrorKind::ExprRes => 25,
ErrorKind::CondReduce => 26,
ErrorKind::Switch => 27,
ErrorKind::TagBits => 28,
ErrorKind::OneOf => 29,
ErrorKind::NoneOf => 30,
ErrorKind::Char => 40,
ErrorKind::CrLf => 41,
ErrorKind::RegexpMatch => 42,
ErrorKind::RegexpMatches => 43,
ErrorKind::RegexpFind => 44,
ErrorKind::RegexpCapture => 45,
ErrorKind::RegexpCaptures => 46,
ErrorKind::TakeWhile1 => 47,
ErrorKind::Complete => 48,
ErrorKind::Fix => 49,
ErrorKind::Escaped => 50,
ErrorKind::EscapedTransform => 51,
ErrorKind::TagStr => 52,
ErrorKind::IsNotStr => 53,
ErrorKind::IsAStr => 54,
ErrorKind::TakeWhile1Str => 55,
ErrorKind::NonEmpty => 56,
ErrorKind::ManyMN => 57,
ErrorKind::TakeUntilAndConsumeStr => 58,
ErrorKind::HexDigit => 59,
ErrorKind::TakeUntilStr => 60,
ErrorKind::OctDigit => 61,
ErrorKind::Many0 => 62,
ErrorKind::Not => 63,
ErrorKind::Permutation => 64,
ErrorKind::ManyTill => 65,
ErrorKind::Verify => 66,
ErrorKind::TakeTill1 => 67,
ErrorKind::TakeUntilAndConsume1 => 68,
ErrorKind::TakeWhileMN => 69,
ErrorKind::ParseTo => 70,
}
}
impl<E> ErrorKind<E> {
#[cfg_attr(rustfmt, rustfmt_skip)]
#[allow(deprecated)]
pub fn description(&self) -> &str {
match *self {
ErrorKind::Custom(_) => "Custom error",
ErrorKind::Tag => "Tag",
ErrorKind::MapRes => "Map on Result",
ErrorKind::MapOpt => "Map on Option",
ErrorKind::Alt => "Alternative",
ErrorKind::IsNot => "IsNot",
ErrorKind::IsA => "IsA",
ErrorKind::SeparatedList => "Separated list",
ErrorKind::SeparatedNonEmptyList => "Separated non empty list",
ErrorKind::Many0 => "Many0",
ErrorKind::Many1 => "Many1",
ErrorKind::Count => "Count",
ErrorKind::TakeUntilAndConsume => "Take until and consume",
ErrorKind::TakeUntil => "Take until",
ErrorKind::TakeUntilEitherAndConsume => "Take until either and consume",
ErrorKind::TakeUntilEither => "Take until either",
ErrorKind::LengthValue => "Length followed by value",
ErrorKind::TagClosure => "Tag closure",
ErrorKind::Alpha => "Alphabetic",
ErrorKind::Digit => "Digit",
ErrorKind::AlphaNumeric => "AlphaNumeric",
ErrorKind::Space => "Space",
ErrorKind::MultiSpace => "Multiple spaces",
ErrorKind::LengthValueFn => "LengthValueFn",
ErrorKind::Eof => "End of file",
ErrorKind::ExprOpt => "Evaluate Option",
ErrorKind::ExprRes => "Evaluate Result",
ErrorKind::CondReduce => "Condition reduce",
ErrorKind::Switch => "Switch",
ErrorKind::TagBits => "Tag on bitstream",
ErrorKind::OneOf => "OneOf",
ErrorKind::NoneOf => "NoneOf",
ErrorKind::Char => "Char",
ErrorKind::CrLf => "CrLf",
ErrorKind::RegexpMatch => "RegexpMatch",
ErrorKind::RegexpMatches => "RegexpMatches",
ErrorKind::RegexpFind => "RegexpFind",
ErrorKind::RegexpCapture => "RegexpCapture",
ErrorKind::RegexpCaptures => "RegexpCaptures",
ErrorKind::TakeWhile1 => "TakeWhile1",
ErrorKind::Complete => "Complete",
ErrorKind::Fix => "Fix",
ErrorKind::Escaped => "Escaped",
ErrorKind::EscapedTransform => "EscapedTransform",
ErrorKind::TagStr => "Tag on strings",
ErrorKind::IsNotStr => "IsNot on strings",
ErrorKind::IsAStr => "IsA on strings",
ErrorKind::TakeWhile1Str => "TakeWhile1 on strings",
ErrorKind::NonEmpty => "NonEmpty",
ErrorKind::ManyMN => "Many(m, n)",
ErrorKind::TakeUntilAndConsumeStr => "Take until and consume on strings",
ErrorKind::HexDigit => "Hexadecimal Digit",
ErrorKind::TakeUntilStr => "Take until on strings",
ErrorKind::OctDigit => "Octal digit",
ErrorKind::Not => "Negation",
ErrorKind::Permutation => "Permutation",
ErrorKind::ManyTill => "ManyTill",
ErrorKind::Verify => "predicate verification",
ErrorKind::TakeTill1 => "TakeTill1",
ErrorKind::TakeUntilAndConsume1 => "Take at least 1 until and consume",
ErrorKind::TakeWhileMN => "TakeWhileMN",
ErrorKind::ParseTo => "Parse string to the specified type",
}
}
/// Convert Err into an ErrorKind.
///
/// This allows application code to use ErrorKind and stay independent from the `verbose-errors` features activation.
pub fn into_error_kind(self) -> ErrorKind<E> {
self
}
}
pub trait Convert<T> {
fn convert(T) -> Self;
}
impl<F, E: From<F>> Convert<ErrorKind<F>> for ErrorKind<E> {
#[cfg_attr(rustfmt, rustfmt_skip)]
#[allow(deprecated)]
fn convert(e: ErrorKind<F>) -> Self {
match e {
ErrorKind::Custom(c) => ErrorKind::Custom(E::from(c)),
ErrorKind::Tag => ErrorKind::Tag,
ErrorKind::MapRes => ErrorKind::MapRes,
ErrorKind::MapOpt => ErrorKind::MapOpt,
ErrorKind::Alt => ErrorKind::Alt,
ErrorKind::IsNot => ErrorKind::IsNot,
ErrorKind::IsA => ErrorKind::IsA,
ErrorKind::SeparatedList => ErrorKind::SeparatedList,
ErrorKind::SeparatedNonEmptyList => ErrorKind::SeparatedNonEmptyList,
ErrorKind::Many1 => ErrorKind::Many1,
ErrorKind::Count => ErrorKind::Count,
ErrorKind::TakeUntilAndConsume => ErrorKind::TakeUntilAndConsume,
ErrorKind::TakeUntil => ErrorKind::TakeUntil,
ErrorKind::TakeUntilEitherAndConsume => ErrorKind::TakeUntilEitherAndConsume,
ErrorKind::TakeUntilEither => ErrorKind::TakeUntilEither,
ErrorKind::LengthValue => ErrorKind::LengthValue,
ErrorKind::TagClosure => ErrorKind::TagClosure,
ErrorKind::Alpha => ErrorKind::Alpha,
ErrorKind::Digit => ErrorKind::Digit,
ErrorKind::AlphaNumeric => ErrorKind::AlphaNumeric,
ErrorKind::Space => ErrorKind::Space,
ErrorKind::MultiSpace => ErrorKind::MultiSpace,
ErrorKind::LengthValueFn => ErrorKind::LengthValueFn,
ErrorKind::Eof => ErrorKind::Eof,
ErrorKind::ExprOpt => ErrorKind::ExprOpt,
ErrorKind::ExprRes => ErrorKind::ExprRes,
ErrorKind::CondReduce => ErrorKind::CondReduce,
ErrorKind::Switch => ErrorKind::Switch,
ErrorKind::TagBits => ErrorKind::TagBits,
ErrorKind::OneOf => ErrorKind::OneOf,
ErrorKind::NoneOf => ErrorKind::NoneOf,
ErrorKind::Char => ErrorKind::Char,
ErrorKind::CrLf => ErrorKind::CrLf,
ErrorKind::RegexpMatch => ErrorKind::RegexpMatch,
ErrorKind::RegexpMatches => ErrorKind::RegexpMatches,
ErrorKind::RegexpFind => ErrorKind::RegexpFind,
ErrorKind::RegexpCapture => ErrorKind::RegexpCapture,
ErrorKind::RegexpCaptures => ErrorKind::RegexpCaptures,
ErrorKind::TakeWhile1 => ErrorKind::TakeWhile1,
ErrorKind::Complete => ErrorKind::Complete,
ErrorKind::Fix => ErrorKind::Fix,
ErrorKind::Escaped => ErrorKind::Escaped,
ErrorKind::EscapedTransform => ErrorKind::EscapedTransform,
ErrorKind::TagStr => ErrorKind::TagStr,
ErrorKind::IsNotStr => ErrorKind::IsNotStr,
ErrorKind::IsAStr => ErrorKind::IsAStr,
ErrorKind::TakeWhile1Str => ErrorKind::TakeWhile1Str,
ErrorKind::NonEmpty => ErrorKind::NonEmpty,
ErrorKind::ManyMN => ErrorKind::ManyMN,
ErrorKind::TakeUntilAndConsumeStr => ErrorKind::TakeUntilAndConsumeStr,
ErrorKind::HexDigit => ErrorKind::HexDigit,
ErrorKind::TakeUntilStr => ErrorKind::TakeUntilStr,
ErrorKind::OctDigit => ErrorKind::OctDigit,
ErrorKind::Many0 => ErrorKind::Many0,
ErrorKind::Not => ErrorKind::Not,
ErrorKind::Permutation => ErrorKind::Permutation,
ErrorKind::ManyTill => ErrorKind::ManyTill,
ErrorKind::Verify => ErrorKind::Verify,
ErrorKind::TakeTill1 => ErrorKind::TakeTill1,
ErrorKind::TakeUntilAndConsume1 => ErrorKind::TakeUntilAndConsume1,
ErrorKind::TakeWhileMN => ErrorKind::TakeWhileMN,
ErrorKind::ParseTo => ErrorKind::ParseTo,
}
}
}
| 33.256625 | 122 | 0.513986 |
6708a48dff598a0430f6447be488bc4192af47ff | 21,333 | /*
* Shared implementation details between server and client
*/
use bytes::BytesMut;
use errors::*;
use futures::{Future, Stream};
use futures::stream::SplitSink;
use protocol::protocol as proto;
use protocol::udp::{SharedUdpHandle, UdpStream};
use protocol::util::{self, Boxable, BoxFuture, HeartbeatAgent, SharedWriter,
SharedSpeedometer, Speedometer, StreamThrottler, ThrottlingHandler};
use websocket::OwnedMessage;
use websocket::async::Client;
use websocket::stream::async::Stream as WsStream;
use std::cell::RefCell;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::rc::Rc;
use std::time::{Instant, Duration};
use tokio_codec::{BytesCodec, Decoder, Framed};
use tokio::net::TcpStream;
use tokio::executor::current_thread;
use tokio_timer;
pub trait TwsServiceState<C: TwsConnection, U: TwsUdpConnection>: 'static + Sized {
fn get_connections(&mut self) -> &mut HashMap<String, C>;
fn get_udp_connections(&mut self) -> &mut HashMap<String, U>;
/*
* The `paused` state of a TwsService session
* indicates whether the underlying WebSocket
* connection has been congested or not.
*/
fn set_paused(&mut self, paused: bool);
fn get_paused(&self) -> bool;
}
macro_rules! make_tws_service_state {
(
$name:ident;
$conn_type:ty, $udp_conn_type:ty;
$conn_field_name:ident, $udp_conn_field_name:ident;
{ $($field:ident: $type:ty),*}
) => (
struct $name {
paused: bool,
$conn_field_name: HashMap<String, $conn_type>,
$udp_conn_field_name: HashMap<String, $udp_conn_type>,
$( $field: $type ),*
}
impl TwsServiceState<$conn_type, $udp_conn_type> for $name {
#[inline(always)]
fn get_connections(&mut self) -> &mut HashMap<String, $conn_type> {
&mut self.$conn_field_name
}
#[inline(always)]
fn get_udp_connections(&mut self) -> &mut HashMap<String, $udp_conn_type> {
&mut self.$udp_conn_field_name
}
#[inline(always)]
fn get_paused(&self) -> bool {
self.paused
}
#[inline(always)]
fn set_paused(&mut self, paused: bool) {
self.paused = paused;
}
}
)
}
/*
* Throttle the TCP connection between
* 1. server and remote
* 2. local and client
* based on the state of the WebSocket stream.
* If the stream is congested, mark the corresponding
* TCP connections as paused.
*/
pub struct TwsTcpReadThrottler<C: TwsConnection, U: TwsUdpConnection, T: TwsServiceState<C, U>> {
_marker: PhantomData<C>,
_marker_2: PhantomData<U>,
state: Rc<RefCell<T>>,
pause_state: HashMap<String, bool>
}
impl<C, U, T> ThrottlingHandler for TwsTcpReadThrottler<C, U, T>
where C: TwsConnection,
U: TwsUdpConnection,
T: TwsServiceState<C, U>
{
fn pause(&mut self, max_speed: u64) {
let mut state = self.state.borrow_mut();
state.set_paused(true);
// Pause all UDP connections before pausing TCP ones
// Don't need anything special for UDP ones,
// pausing just causes all packet to be lost
for (_, v) in state.get_udp_connections() {
v.get_handle().borrow_mut().pause();
}
// The stream is now at its full speed.
// Try to pause the fastest substreams first
let mut it = state.get_connections().iter_mut()
.map(|(c, v)| {
let speed = v.get_speedometer().borrow().speed();
(c, v, speed)
})
.collect::<Vec<_>>();
it.sort_by(|(_, _, s1), (_, _, s2)| s1.cmp(s2));
let mut sum = 0;
for (_, _, s) in &it {
sum += *s;
}
for (c, v, s) in it {
if *self.pause_state.get(c).unwrap_or(&false) {
v.pause();
self.pause_state.insert(c.clone(), true);
sum -= s;
if sum < max_speed {
break;
}
}
}
}
fn resume(&mut self) {
let mut state = self.state.borrow_mut();
state.set_paused(false);
// Resume all UDP connections before TCP ones
for (_, v) in state.get_udp_connections() {
v.get_handle().borrow_mut().resume();
}
// Resume only the paused streams
for (c, s) in &self.pause_state {
if *s {
state.get_connections().get_mut(c).map_or((), |s| s.resume());
}
}
self.pause_state.clear();
}
fn is_paused(&self) -> bool {
self.state.borrow().get_paused()
}
#[inline(always)]
fn allow_pause_multiple_times(&self) -> bool {
true
}
}
/*
* Shared logic abstracted from both the server and the client
* should be implemented only on structs
*/
pub trait TwsService<C: TwsConnection, U: TwsUdpConnection, T: TwsServiceState<C, U>, S: 'static + WsStream>: 'static + Sized {
/*
* Required fields simulated by required methods.
*/
fn get_passwd(&self) -> &str;
fn get_writer(&self) -> &SharedWriter<SplitSink<Client<S>>>;
fn get_heartbeat_agent(&self) -> &HeartbeatAgent<SplitSink<Client<S>>>;
fn get_logger(&self) -> &util::Logger;
fn get_state(&self) -> &Rc<RefCell<T>>;
fn get_udp_timeout(&self) -> u64;
/*
* Execute this service.
* More precisely, execute the WebSocket-related services.
* Receive from WebSocket and parse the TWS protocol packets.
* This will consume the ownership of self. Please spawn
* the returned future on an event loop.
*/
fn run_service<'a>(self, client: Client<S>) -> BoxFuture<'a, ()> {
let logger = self.get_logger().clone();
let state = self.get_state().clone();
let (sink, stream) = client.split();
self.get_writer().set_throttling_handler(TwsTcpReadThrottler {
_marker: PhantomData,
_marker_2: PhantomData,
state: self.get_state().clone(),
pause_state: HashMap::new()
});
// Obtain a future representing the writing tasks.
let sink_write = self.get_writer().run(sink).map_err(clone!(logger; |e| {
do_log!(logger, ERROR, "{:?}", e);
}));
// Obtain a future to do heartbeats.
let heartbeat_work = self.get_heartbeat_agent().run().map_err(clone!(logger; |_| {
do_log!(logger, ERROR, "Session timed out.");
}));
// UDP cleanup work
let udp_cleanup_work = tokio_timer::Interval::new(Instant::now(), Duration::from_millis(self.get_udp_timeout()))
.for_each(clone!(state; |_| {
for (_, conn) in state.borrow_mut().get_udp_connections() {
conn.get_handle().borrow().notify();
}
Ok(())
}));
// The main task
// 3 combined streams. Will finish once one of them finish.
// i.e. when the connection closes, everything here should finish.
util::AlternatingStream::new(stream)
.map_err(clone!(logger; |e| {
do_log!(logger, ERROR, "{:?}", e);
"session failed.".into()
}))
.for_each(move |msg| {
// Process each message from the client
// Note that this does not return a future.
// Anything that happens for processing
// the message and requires doing things
// on the event loop should spawn a task
// instead of returning it.
// In order not to block the main WebSocket
// stream.
self.on_message(msg);
Ok(()) as Result<()>
})
.select2(sink_write) // Execute task for the writer
.select2(heartbeat_work) // Execute task for heartbeats
.select2(udp_cleanup_work) // Execute task for cleaning up UDP connections
.then(clone!(logger; |_| {
do_log!(logger, INFO, "Session finished.");
// Clean-up job
// Drop all the connections
// will be closed by the implementation of Drop
//state.borrow_mut().remote_connections.clear();
Ok(())
}))
._box()
}
/*
* Process new WebSocket packets
*/
fn on_message(&self, msg: OwnedMessage) {
match msg {
// Control / Data packets can be in either Text or Binary form.
OwnedMessage::Text(text) => self.on_packet(proto::parse_packet(&self.get_passwd(), text.as_bytes())),
OwnedMessage::Binary(bytes) => self.on_packet(proto::parse_packet(&self.get_passwd(), &bytes)),
// Send pong back to keep connection alive
OwnedMessage::Ping(msg) => self.get_writer().feed(OwnedMessage::Pong(msg)),
// Notify the heartbeat agent that a pong is received.
OwnedMessage::Pong(_) => self.get_heartbeat_agent().set_heartbeat_received(),
OwnedMessage::Close(_) => self.get_writer().close()
};
}
/*
* Process TWS protocol packets
*/
fn on_packet(&self, packet: proto::Packet) {
//do_log!(self.get_logger(), DEBUG, "{:?}", packet);
match packet {
// Call corresponding event methods.
// Implementations can override these to control event handling.
proto::Packet::Handshake(addr) => self.on_handshake(addr),
proto::Packet::Connect(conn_id) => self.on_connect(conn_id),
proto::Packet::UdpConnect(conn_id) => self.on_udp_connect(conn_id),
proto::Packet::ConnectionState((conn_id, state)) => self.on_connect_state(conn_id, state),
proto::Packet::Data((conn_id, data)) => self.on_data(conn_id, data),
proto::Packet::UdpData((conn_id, data)) => self.on_udp_data(conn_id, data),
// Process unknown packets
_ => self.on_unknown()
}
}
/*
* Process `Pause` and `Resume` states
* which is identical between the client and the server
*
* Pause or resume the `read` part of the corresponding connection
* on request.
*/
fn _on_connect_state(&self, conn_id: &str, conn_state: &proto::ConnectionState) {
if conn_state.is_pause() {
self.get_state().borrow_mut().get_connections().get_mut(conn_id)
.map_or((), |c| c.pause());
} else if conn_state.is_resume() {
self.get_state().borrow_mut().get_connections().get_mut(conn_id)
.map_or((), |c| c.resume());
}
}
/*
* Overridable events
*/
fn on_unknown(&self) {}
fn on_handshake(&self, _addr: SocketAddr) {}
fn on_connect(&self, _conn_id: &str) {}
fn on_udp_connect(&self, _conn_id: &str) {}
fn on_connect_state(&self, _conn_id: &str, _state: proto::ConnectionState) {
// If this method is overridden, implementations must call self._on_connect_state()
self._on_connect_state(_conn_id, &_state);
}
fn on_data(&self, _conn_id: &str, _data: &[u8]) {}
fn on_udp_data(&self, _conn_id: &str, _data: &[u8]) {}
}
macro_rules! make_tws_service {
(
$name:ident;
$conn_type:ty, $udp_conn_type:ty, $state_type:ty, $stream_type:ty;
{ $($field:ident: $type:ty),*};
$(override fn $fn_name:ident (&$s:ident $(, $param:ident: $ptype:ty)*) -> $ret_type:ty $block:block)*
) => {
struct $name {
logger: util::Logger,
writer: SharedWriter<SplitSink<Client<$stream_type>>>,
heartbeat_agent: HeartbeatAgent<SplitSink<Client<$stream_type>>>,
state: Rc<RefCell<$state_type>>,
$( $field: $type ),*
}
impl TwsService<$conn_type, $udp_conn_type, $state_type, $stream_type> for $name {
#[inline(always)]
fn get_writer(&self) -> &SharedWriter<SplitSink<Client<$stream_type>>> {
&self.writer
}
#[inline(always)]
fn get_heartbeat_agent(&self) -> &HeartbeatAgent<SplitSink<Client<$stream_type>>> {
&self.heartbeat_agent
}
#[inline(always)]
fn get_logger(&self) -> &util::Logger {
&self.logger
}
#[inline(always)]
fn get_state(&self) -> &Rc<RefCell<$state_type>> {
&self.state
}
$(
fn $fn_name(&$s, $($param: $ptype),*) -> $ret_type { $block }
)*
}
};
}
// Splitted Sink for Tcp byte streams
pub type TcpSink = SplitSink<Framed<TcpStream, BytesCodec>>;
/*
* Handler of throttling events from the writing part of
* the TCP connection
* 1. from server to remote
* 2. from local to client
* and convert them into TWS Connection State messages
* to instruct the other side to block or resume
* the reading part.
*/
pub struct TwsTcpWriteThrottlingHandler<S: 'static + WsStream> {
conn_id: String,
ws_writer: SharedWriter<SplitSink<Client<S>>>,
paused: bool
}
impl<S: 'static + WsStream> ThrottlingHandler for TwsTcpWriteThrottlingHandler<S> {
fn pause(&mut self, _max_speed: u64) {
self.paused = true;
self.ws_writer.feed(OwnedMessage::Text(proto::connect_state_build(&self.conn_id, proto::ConnectionState::Pause)));
}
fn resume(&mut self) {
self.paused = false;
self.ws_writer.feed(OwnedMessage::Text(proto::connect_state_build(&self.conn_id, proto::ConnectionState::Resume)));
}
fn is_paused(&self) -> bool {
self.paused
}
}
/*
* Handler of data and close events emitted by
* TCP connections transmitted on TWS
* responsible for forwarding these events to remote
*/
pub trait TwsConnectionHandler: 'static + Sized {
fn on_data(&self, _d: BytesMut);
fn on_close(&self);
}
/*
* Shared logic for TCP connection
* 1. from server to remote
* 2. from client to local (which is TWS client)
*/
pub trait TwsConnection: 'static + Sized {
#[inline(always)]
fn get_endpoint_descriptors() -> (&'static str, &'static str) {
("remote", "client")
}
/*
* Static method to bootstrap the connection
* set up the reading and writing part of the connection
*/
fn create<S: 'static + WsStream, H: TwsConnectionHandler>(
conn_id: String, logger: util::Logger,
client: TcpStream, ws_writer: SharedWriter<SplitSink<Client<S>>>,
conn_handler: H
) -> (SharedSpeedometer, SharedWriter<TcpSink>, StreamThrottler) {
let (a, b) = Self::get_endpoint_descriptors();
let conn_handler = Rc::new(conn_handler);
let speedometer = Rc::new(RefCell::new(Speedometer::new()));
let read_throttler = StreamThrottler::new();
let (sink, stream) = BytesCodec::new().framed(client).split();
// SharedWriter for sending to remote
let remote_writer = SharedWriter::new();
remote_writer.set_throttling_handler(TwsTcpWriteThrottlingHandler {
conn_id: conn_id.clone(),
ws_writer,
paused: false
});
// Forward remote packets to client
let stream_work = read_throttler.wrap_stream(util::AlternatingStream::new(stream)).for_each(clone!(conn_handler, speedometer; |p| {
// Calculate the speed of the TCP stream (read part)
speedometer.borrow_mut().feed_counter(p.len() as u64);
// Forward
conn_handler.on_data(p);
Ok(())
})).map_err(clone!(a, b, logger, conn_id; |e| {
do_log!(logger, ERROR, "[{}] {} => {} error {:?}", conn_id, a, b, e);
})).map(|_| ());
// Forward client packets to remote
// Client packets should be sent through `send` method.
let sink_work = remote_writer.run(sink)
.map_err(clone!(a, b, logger, conn_id; |e| {
do_log!(logger, ERROR, "[{}] {} => {} error {:?}", conn_id, b, a, e);
}));
// Schedule the two jobs on the event loop
// Use `select` to wait one of the jobs to finish.
// This is often the `sink_work` if no error on remote side
// has happened.
// Once one of them is finished, just tear down the whole
// channel.
current_thread::spawn(stream_work.select(sink_work)
.then(clone!(logger, conn_id, conn_handler; |_| {
// Clean-up job upon finishing
// No matter if there is any error.
do_log!(logger, INFO, "[{}] Channel closing.", conn_id);
conn_handler.on_close();
Ok(())
})));
(speedometer, remote_writer, read_throttler)
}
fn get_writer(&self) -> &SharedWriter<TcpSink>;
fn get_conn_id(&self) -> &str;
fn get_logger(&self) -> &util::Logger;
fn get_speedometer(&self) -> &SharedSpeedometer;
fn get_read_throttler(&mut self) -> &mut StreamThrottler;
fn get_read_pause_counter(&self) -> usize;
fn set_read_pause_counter(&mut self, counter: usize);
fn close(&self) {
self.get_writer().close();
}
/*
* Pause the reading part if it is not paused yet
*/
fn pause(&mut self) {
let counter = self.get_read_pause_counter();
if counter == 0 {
self.get_read_throttler().pause(0);
}
self.set_read_pause_counter(counter + 1);
}
/*
* Resume the reading part if no one requires it
* to be paused
*/
fn resume(&mut self) {
let counter = self.get_read_pause_counter();
if counter == 1 {
self.get_read_throttler().resume();
}
if counter > 0 {
self.set_read_pause_counter(counter - 1);
}
}
}
macro_rules! make_tws_connection {
(
$name:ident; $writer_name:ident;
($endpoint_1:expr, $endpoint_2:expr)
) => (
struct $name {
conn_id: String,
logger: util::Logger,
$writer_name: SharedWriter<TcpSink>,
read_throttler: StreamThrottler,
speedometer: SharedSpeedometer,
read_pause_counter: usize
}
impl TwsConnection for $name {
#[inline(always)]
fn get_endpoint_descriptors() -> (&'static str, &'static str) {
($endpoint_1, $endpoint_2)
}
#[inline(always)]
fn get_logger(&self) -> &util::Logger {
&self.logger
}
#[inline(always)]
fn get_conn_id(&self) -> &str {
&self.conn_id
}
#[inline(always)]
fn get_writer(&self) -> &SharedWriter<TcpSink> {
&self.$writer_name
}
#[inline(always)]
fn get_read_throttler(&mut self) -> &mut StreamThrottler {
&mut self.read_throttler
}
#[inline(always)]
fn get_read_pause_counter(&self) -> usize {
self.read_pause_counter
}
#[inline(always)]
fn get_speedometer(&self) -> &SharedSpeedometer {
&self.speedometer
}
#[inline(always)]
fn set_read_pause_counter(&mut self, counter: usize) {
self.read_pause_counter = counter;
}
}
impl Drop for $name {
fn drop(&mut self) {
/*
* Close immediately on drop.
*/
self.close();
}
}
)
}
pub trait TwsUdpConnectionHandler: 'static + Sized {
fn on_data(&self, _d: Vec<u8>);
fn on_close(&self);
}
pub trait TwsUdpConnection: 'static + Sized {
fn create<H: TwsUdpConnectionHandler>(
conn_id: String, logger: util::Logger,
client: UdpStream, handler: H
) {
let handler = Rc::new(handler);
let stream_work = util::AlternatingStream::new(client).for_each(clone!(handler; |data| {
handler.on_data(data);
Ok(())
}));
current_thread::spawn(stream_work.then(clone!(conn_id, logger, handler; |_| {
do_log!(logger, INFO, "[{}] UDP connection idle. Closing.", conn_id);
handler.on_close();
Ok(())
})))
}
fn get_handle(&self) -> &SharedUdpHandle;
fn pause(&self) {
self.get_handle().borrow_mut().pause();
}
fn resume(&self) {
self.get_handle().borrow_mut().resume();
}
}
macro_rules! make_tws_udp_connection {
(
$name:ident;
{ $($field:ident: $type:ty),*}
) => (
#[allow(dead_code)]
struct $name {
conn_id: String,
handle: SharedUdpHandle,
logger: util::Logger,
$( $field: $type ),*
}
impl TwsUdpConnection for $name {
#[inline(always)]
fn get_handle(&self) -> &SharedUdpHandle {
&self.handle
}
}
)
} | 33.437304 | 139 | 0.566962 |
2f1dc24fabab6ab9b9fc00ee4c94fc1358e8ef3e | 20,733 | use crate::physics::RigidBody;
use crate::scene::commands::physics::MoveRigidBodyCommand;
use crate::world::physics::selection::RigidBodySelection;
use crate::{
camera::CameraController,
interaction::{
calculate_gizmo_distance_scaling, gizmo::move_gizmo::MoveGizmo, plane::PlaneKind,
InteractionMode,
},
scene::{
commands::{
graph::MoveNodeCommand, sound::MoveSpatialSoundSourceCommand, ChangeSelectionCommand,
CommandGroup, SceneCommand,
},
EditorScene, Selection,
},
settings::Settings,
world::{graph::selection::GraphSelection, sound::selection::SoundSelection},
GameEngine, Message,
};
use rg3d::{
core::{
algebra::{Matrix4, Point3, Vector2, Vector3},
math::plane::Plane,
pool::Handle,
},
scene::{graph::Graph, node::Node, Scene},
sound::source::SoundSource,
};
use std::sync::mpsc::Sender;
#[derive(Copy, Clone)]
enum MovableEntity {
Node(Handle<Node>),
Sound(Handle<SoundSource>),
RigidBody(Handle<RigidBody>),
}
impl MovableEntity {
fn position(&self, scene: &Scene, editor_scene: &EditorScene) -> Vector3<f32> {
match *self {
MovableEntity::Node(node) => **scene.graph[node].local_transform().position(),
MovableEntity::Sound(sound) => {
let state = scene.sound_context.state();
match state.source(sound) {
SoundSource::Generic(_) => Vector3::default(),
SoundSource::Spatial(spatial) => spatial.position(),
}
}
MovableEntity::RigidBody(rigid_body) => {
editor_scene.physics.bodies[rigid_body].position
}
}
}
fn set_position(
&self,
scene: &mut Scene,
editor_scene: &mut EditorScene,
position: Vector3<f32>,
) {
match *self {
MovableEntity::Node(node) => {
scene.graph[node]
.local_transform_mut()
.set_position(position);
}
MovableEntity::Sound(sound) => {
let mut state = scene.sound_context.state();
if let SoundSource::Spatial(spatial) = state.source_mut(sound) {
spatial.set_position(position);
}
}
MovableEntity::RigidBody(rigid_body) => {
editor_scene.physics.bodies[rigid_body].position = position
}
}
}
}
struct Entry {
entity: MovableEntity,
initial_offset_gizmo_space: Vector3<f32>,
initial_local_position: Vector3<f32>,
initial_parent_inv_global_transform: Matrix4<f32>,
new_local_position: Vector3<f32>,
}
struct MoveContext {
plane: Plane,
objects: Vec<Entry>,
plane_kind: PlaneKind,
gizmo_inv_transform: Matrix4<f32>,
gizmo_local_transform: Matrix4<f32>,
}
impl MoveContext {
pub fn from_filler<F>(
scene: &Scene,
move_gizmo: &MoveGizmo,
camera_controller: &CameraController,
plane_kind: PlaneKind,
mouse_pos: Vector2<f32>,
frame_size: Vector2<f32>,
mut fill: F,
) -> Self
where
F: FnMut(Vector3<f32>, Matrix4<f32>, Vector3<f32>) -> Vec<Entry>,
{
let graph = &scene.graph;
let gizmo_origin = &graph[move_gizmo.origin];
let gizmo_inv_transform = gizmo_origin
.global_transform()
.try_inverse()
.unwrap_or_default();
let look_direction =
gizmo_inv_transform.transform_vector(&graph[camera_controller.camera].look_vector());
let plane = plane_kind.make_plane_from_view(look_direction);
let plane_point = plane_kind.project_point(
camera_controller
.pick_on_plane(plane, graph, mouse_pos, frame_size, gizmo_inv_transform)
.unwrap_or_default(),
);
Self {
plane,
objects: fill(
plane_point,
gizmo_inv_transform,
gizmo_origin.global_position(),
),
gizmo_local_transform: gizmo_origin.local_transform().matrix(),
gizmo_inv_transform,
plane_kind,
}
}
pub fn from_graph_selection(
selection: &GraphSelection,
scene: &Scene,
move_gizmo: &MoveGizmo,
camera_controller: &CameraController,
plane_kind: PlaneKind,
mouse_pos: Vector2<f32>,
frame_size: Vector2<f32>,
) -> Self {
Self::from_filler(
scene,
move_gizmo,
camera_controller,
plane_kind,
mouse_pos,
frame_size,
|plane_point, gizmo_inv_transform, gizmo_origin| {
let graph = &scene.graph;
selection
.root_nodes(graph)
.iter()
.map(|&node_handle| {
let node = &graph[node_handle];
Entry {
entity: MovableEntity::Node(node_handle),
initial_offset_gizmo_space: gizmo_inv_transform
.transform_point(&Point3::from(node.global_position()))
.coords
- plane_point
- gizmo_inv_transform
.transform_vector(&(node.global_position() - gizmo_origin)),
new_local_position: **node.local_transform().position(),
initial_local_position: **node.local_transform().position(),
initial_parent_inv_global_transform: if node.parent().is_some() {
graph[node.parent()]
.global_transform()
.try_inverse()
.unwrap_or_default()
} else {
Matrix4::identity()
},
}
})
.collect()
},
)
}
pub fn from_sound_selection(
selection: &SoundSelection,
scene: &Scene,
move_gizmo: &MoveGizmo,
camera_controller: &CameraController,
plane_kind: PlaneKind,
mouse_pos: Vector2<f32>,
frame_size: Vector2<f32>,
) -> Self {
let state = scene.sound_context.state();
Self::from_filler(
scene,
move_gizmo,
camera_controller,
plane_kind,
mouse_pos,
frame_size,
|plane_point, gizmo_inv_transform, gizmo_origin| {
selection
.sources()
.iter()
.map(|&source_handle| {
let source = state.source(source_handle);
match source {
SoundSource::Generic(_) => None,
SoundSource::Spatial(spatial) => Some(Entry {
entity: MovableEntity::Sound(source_handle),
initial_offset_gizmo_space: gizmo_inv_transform
.transform_point(&Point3::from(spatial.position()))
.coords
- plane_point
- gizmo_inv_transform
.transform_vector(&(spatial.position() - gizmo_origin)),
new_local_position: spatial.position(),
initial_local_position: spatial.position(),
initial_parent_inv_global_transform: Matrix4::identity(),
}),
}
})
.flatten()
.collect()
},
)
}
pub fn from_rigid_body_selection(
selection: &RigidBodySelection,
scene: &Scene,
editor_scene: &EditorScene,
move_gizmo: &MoveGizmo,
camera_controller: &CameraController,
plane_kind: PlaneKind,
mouse_pos: Vector2<f32>,
frame_size: Vector2<f32>,
) -> Self {
Self::from_filler(
scene,
move_gizmo,
camera_controller,
plane_kind,
mouse_pos,
frame_size,
|plane_point, gizmo_inv_transform, gizmo_origin| {
selection
.bodies()
.iter()
.map(|&rigid_body_handle| {
let rigid_body = &editor_scene.physics.bodies[rigid_body_handle];
Some(Entry {
entity: MovableEntity::RigidBody(rigid_body_handle),
initial_offset_gizmo_space: gizmo_inv_transform
.transform_point(&Point3::from(rigid_body.position))
.coords
- plane_point
- gizmo_inv_transform
.transform_vector(&(rigid_body.position - gizmo_origin)),
new_local_position: rigid_body.position,
initial_local_position: rigid_body.position,
initial_parent_inv_global_transform: Matrix4::identity(),
})
})
.flatten()
.collect()
},
)
}
pub fn update(
&mut self,
graph: &Graph,
camera_controller: &CameraController,
settings: &Settings,
mouse_position: Vector2<f32>,
frame_size: Vector2<f32>,
) {
if let Some(picked_position_gizmo_space) = camera_controller
.pick_on_plane(
self.plane,
graph,
mouse_position,
frame_size,
self.gizmo_inv_transform,
)
.map(|p| self.plane_kind.project_point(p))
{
for entry in self.objects.iter_mut() {
let mut new_local_position = entry.initial_local_position
+ entry.initial_parent_inv_global_transform.transform_vector(
&self.gizmo_local_transform.transform_vector(
&(picked_position_gizmo_space + entry.initial_offset_gizmo_space),
),
);
// Snap to grid if needed.
if settings.move_mode_settings.grid_snapping {
fn round_to_step(x: f32, step: f32) -> f32 {
x - x % step
}
new_local_position = Vector3::new(
round_to_step(
new_local_position.x,
settings.move_mode_settings.x_snap_step,
),
round_to_step(
new_local_position.y,
settings.move_mode_settings.y_snap_step,
),
round_to_step(
new_local_position.z,
settings.move_mode_settings.z_snap_step,
),
);
}
entry.new_local_position = new_local_position;
}
}
}
}
pub struct MoveInteractionMode {
move_context: Option<MoveContext>,
move_gizmo: MoveGizmo,
message_sender: Sender<Message>,
}
impl MoveInteractionMode {
pub fn new(
editor_scene: &EditorScene,
engine: &mut GameEngine,
message_sender: Sender<Message>,
) -> Self {
Self {
move_context: None,
move_gizmo: MoveGizmo::new(editor_scene, engine),
message_sender,
}
}
}
impl InteractionMode for MoveInteractionMode {
fn on_left_mouse_button_down(
&mut self,
editor_scene: &mut EditorScene,
engine: &mut GameEngine,
mouse_pos: Vector2<f32>,
frame_size: Vector2<f32>,
) {
let scene = &mut engine.scenes[editor_scene.scene];
let graph = &mut scene.graph;
let camera = editor_scene.camera_controller.camera;
let camera_pivot = editor_scene.camera_controller.pivot;
if let Some(result) = editor_scene.camera_controller.pick(
mouse_pos,
graph,
editor_scene.root,
frame_size,
true,
|handle, _| {
handle != camera && handle != camera_pivot && handle != self.move_gizmo.origin
},
) {
if let Some(plane_kind) = self.move_gizmo.handle_pick(result.node, graph) {
match &editor_scene.selection {
Selection::Graph(selection) => {
self.move_context = Some(MoveContext::from_graph_selection(
selection,
scene,
&self.move_gizmo,
&editor_scene.camera_controller,
plane_kind,
mouse_pos,
frame_size,
));
}
Selection::Sound(selection) => {
self.move_context = Some(MoveContext::from_sound_selection(
selection,
scene,
&self.move_gizmo,
&editor_scene.camera_controller,
plane_kind,
mouse_pos,
frame_size,
));
}
Selection::RigidBody(selection) => {
self.move_context = Some(MoveContext::from_rigid_body_selection(
selection,
scene,
editor_scene,
&self.move_gizmo,
&editor_scene.camera_controller,
plane_kind,
mouse_pos,
frame_size,
))
}
_ => {}
}
}
}
}
fn on_left_mouse_button_up(
&mut self,
editor_scene: &mut EditorScene,
engine: &mut GameEngine,
mouse_pos: Vector2<f32>,
frame_size: Vector2<f32>,
) {
let scene = &mut engine.scenes[editor_scene.scene];
if let Some(move_context) = self.move_context.take() {
let mut changed = false;
for initial_state in move_context.objects.iter() {
if initial_state.entity.position(scene, editor_scene)
!= initial_state.initial_local_position
{
changed = true;
break;
}
}
if changed {
let commands = CommandGroup::from(
move_context
.objects
.iter()
.map(|initial_state| match initial_state.entity {
MovableEntity::Node(node) => {
Some(SceneCommand::new(MoveNodeCommand::new(
node,
initial_state.initial_local_position,
**scene.graph[node].local_transform().position(),
)))
}
MovableEntity::Sound(sound) => {
let state = scene.sound_context.state();
match state.source(sound) {
SoundSource::Generic(_) => None,
SoundSource::Spatial(spatial) => {
Some(SceneCommand::new(MoveSpatialSoundSourceCommand::new(
sound,
initial_state.initial_local_position,
spatial.position(),
)))
}
}
}
MovableEntity::RigidBody(rigid_body) => {
Some(SceneCommand::new(MoveRigidBodyCommand::new(
rigid_body,
initial_state.initial_local_position,
editor_scene.physics.bodies[rigid_body].position,
)))
}
})
.flatten()
.collect::<Vec<_>>(),
);
// Commit changes.
self.message_sender
.send(Message::DoSceneCommand(SceneCommand::new(commands)))
.unwrap();
}
} else {
let new_selection = editor_scene
.camera_controller
.pick(
mouse_pos,
&scene.graph,
editor_scene.root,
frame_size,
false,
|_, _| true,
)
.map(|result| {
if let (Selection::Graph(selection), true) = (
&editor_scene.selection,
engine.user_interface.keyboard_modifiers().control,
) {
let mut selection = selection.clone();
selection.insert_or_exclude(result.node);
Selection::Graph(selection)
} else {
Selection::Graph(GraphSelection::single_or_empty(result.node))
}
})
.unwrap_or_else(|| Selection::Graph(GraphSelection::default()));
if new_selection != editor_scene.selection {
self.message_sender
.send(Message::do_scene_command(ChangeSelectionCommand::new(
new_selection,
editor_scene.selection.clone(),
)))
.unwrap();
}
}
}
fn on_mouse_move(
&mut self,
_mouse_offset: Vector2<f32>,
mouse_position: Vector2<f32>,
_camera: Handle<Node>,
editor_scene: &mut EditorScene,
engine: &mut GameEngine,
frame_size: Vector2<f32>,
settings: &Settings,
) {
if let Some(move_context) = self.move_context.as_mut() {
let scene = &mut engine.scenes[editor_scene.scene];
let graph = &mut scene.graph;
move_context.update(
graph,
&editor_scene.camera_controller,
settings,
mouse_position,
frame_size,
);
for entry in move_context.objects.iter() {
entry
.entity
.set_position(scene, editor_scene, entry.new_local_position);
}
}
}
fn update(
&mut self,
editor_scene: &mut EditorScene,
camera: Handle<Node>,
engine: &mut GameEngine,
) {
let scene = &mut engine.scenes[editor_scene.scene];
let graph = &mut scene.graph;
if !editor_scene.selection.is_empty() {
let scale = calculate_gizmo_distance_scaling(graph, camera, self.move_gizmo.origin);
self.move_gizmo.set_visible(graph, true);
self.move_gizmo.sync_transform(
scene,
&editor_scene.selection,
&editor_scene.physics,
scale,
);
} else {
self.move_gizmo.set_visible(graph, false);
}
}
fn deactivate(&mut self, editor_scene: &EditorScene, engine: &mut GameEngine) {
let graph = &mut engine.scenes[editor_scene.scene].graph;
self.move_gizmo.set_visible(graph, false);
}
}
| 36.120209 | 98 | 0.470168 |
896051a47341dd1efbd1f2573d09f97edc47bb8c | 12,619 | //! Proxy server main process
//!
//! # Server workflow summary
//!
//! ```text
//! Client Acceptor Server External Service
//! | | | |
//! | | | |
//! |----------->| | |
//! |connect(2) x accept(2) | |
//! | |------------>| |
//! | |Connect | |
//! | | | |
//! | | x Session::new |
//! . . . |
//! . . . |
//! | | |
//! | [ establish connection ] | |
//! | - authorize | |
//! | - filtering | |
//! . . |
//! . . |
//! | | incoming outgoing |
//! | | relay relay |
//! | x -----------x--------> x |
//! | |spawn_relay | | |
//! | . | | |
//! | . | | |
//! | | | |
//!
//! [ repeat ] | | |
//! |------------------------------------------------->| |
//! |write(2) | |-------->|
//! | | |write(2) |
//! | | | |
//! | | |<--------|
//! |<-------------------------------------------------|read(2) |
//! |read(2) | | |
//! | | | |
//!
//! [ alt ]
//! [ relay completed ] | |
//! | . | |
//! | . . |
//! | | . |
//! | | x complete .
//! | | .
//! | |<----------------------x complete
//! | | Disconnect
//! | |
//!
//! [ alt ]
//! [ abort relay ] | | |
//! | x recv Terminate |
//! | | | |
//! | |----------->| |
//! | |send(()) x |
//! | | |
//! | |---------------------->|
//! | |send(()) |
//! | | |
//! | |<----------------------x
//! | | Disconnect
//! | |
//! ```
use std::collections::HashMap;
use std::net::TcpStream;
use std::sync::{
mpsc::{self, Receiver, Sender, SyncSender},
Arc, Mutex,
};
use std::thread;
use log::*;
use rand::prelude::*;
use crate::acceptor::{Binder, TcpBinder};
use crate::auth_service::{AuthService, NoAuthService};
use crate::byte_stream::ByteStream;
use crate::config::ServerConfig;
use crate::connector::{Connector, TcpUdpConnector};
use crate::error::Error;
use crate::model::{ProtocolVersion, SocketAddr};
use crate::server_command::ServerCommand;
use crate::session::{Session, SessionHandle, SessionId};
use crate::thread::spawn_thread;
pub struct Server<S, T, C> {
config: ServerConfig,
tx_cmd: Sender<ServerCommand<S>>,
rx_cmd: Receiver<ServerCommand<S>>,
/// bind server address
binder: T,
/// send termination message to the acceptor
tx_acceptor_done: SyncSender<()>,
/// make connection to service host
connector: C,
protocol_version: ProtocolVersion,
session: HashMap<SessionId, SessionHandle>,
/// random context for generating SessionIds
id_rng: StdRng,
}
/// spawn a thread send accepted stream to `tx`
fn spawn_acceptor<S>(
acceptor: impl Iterator<Item = (S, SocketAddr)> + Send + 'static,
tx: Sender<ServerCommand<S>>,
) -> Result<thread::JoinHandle<()>, Error>
where
S: ByteStream + 'static,
{
use ServerCommand::*;
Ok(spawn_thread("acceptor", move || {
for (strm, addr) in acceptor {
if tx.send(Connect(strm, addr)).is_err() {
info!("disconnected ServerCommand chan");
break;
}
}
})?)
}
/// spawn a thread perform `Session.start`
///
///
/// - *session*
/// Session to spawn.
/// - *tx*
/// Sender of session termination message.
/// - *addr*
/// Address of the client connects to this server.
/// - *strm*
/// Established connection between a client and this server.
fn spawn_session<S, D, M>(
session: Session<D, M, S>,
tx: SyncSender<()>,
addr: SocketAddr,
strm: S,
) -> SessionHandle
where
S: ByteStream + 'static,
D: Connector + 'static,
M: AuthService + 'static,
{
let session_th = spawn_thread(&format!("{}: {}", session.id, addr), move || {
session.start(addr, strm)
})
.unwrap();
SessionHandle::new(addr, session_th, tx)
}
impl Server<TcpStream, TcpBinder, TcpUdpConnector> {
pub fn new(config: ServerConfig) -> (Self, mpsc::Sender<ServerCommand<TcpStream>>) {
let (tx_done, rx_done) = mpsc::sync_channel(1);
Server::<TcpStream, TcpBinder, TcpUdpConnector>::with_binder(
config.clone(),
TcpBinder::new(
config.client_rw_timeout,
Arc::new(Mutex::new(rx_done)),
config.accept_timeout,
),
tx_done,
TcpUdpConnector::new(config.server_rw_timeout),
)
}
}
impl<S, T, C> Server<S, T, C>
where
S: ByteStream + 'static,
T: Binder<Stream = S>,
C: Connector + Clone + 'static,
{
pub fn with_binder(
config: ServerConfig,
binder: T,
tx_acceptor_done: SyncSender<()>,
connector: C,
) -> (Self, Sender<ServerCommand<S>>) {
let (tx, rx) = mpsc::channel();
(
Self {
config,
tx_cmd: tx.clone(),
rx_cmd: rx,
binder,
tx_acceptor_done,
connector,
protocol_version: ProtocolVersion::from(5),
session: HashMap::new(),
id_rng: StdRng::from_entropy(),
},
tx,
)
}
fn next_session_id(&mut self) -> SessionId {
loop {
let next_candidate = self.id_rng.next_u32().into();
if self.session.contains_key(&next_candidate) {
continue;
}
debug!("next session id is issued: {}", next_candidate);
return next_candidate;
}
}
/// Server main loop
pub fn serve(&mut self) -> Result<(), Error> {
let acceptor = self.binder.bind(self.config.server_addr())?;
let accept_th = spawn_acceptor(acceptor, self.tx_cmd.clone())?;
while let Ok(cmd) = self.rx_cmd.recv() {
use ServerCommand::*;
info!("cmd: {:?}", cmd);
match cmd {
Terminate => {
self.tx_acceptor_done.send(()).ok();
self.session.iter().for_each(|(_, ss)| ss.stop());
self.session.drain().for_each(|(_, ss)| {
ss.join().ok();
});
debug!("join accept thread");
accept_th.join().ok();
break;
}
Connect(stream, addr) => {
let (session, tx) = Session::new(
self.next_session_id(),
self.protocol_version,
self.connector.clone(),
NoAuthService::new(),
self.config.server_addr(),
self.config.connect_rule(),
self.tx_cmd.clone(),
);
self.session
.insert(session.id, spawn_session(session, tx, addr, stream));
}
Disconnect(id) => {
if let Some(session) = self.session.remove(&id) {
let addr = session.client_addr();
session.stop();
match session.join() {
Ok(Ok(())) => info!("session is stopped: {}: {}", addr, id),
Ok(Err(err)) => error!("session error: {}: {}: {}", addr, id, err),
Err(err) => error!("session panic: {}: {}: {:?}", addr, id, err),
}
} else {
error!("session has already been stopped: {}", id);
}
}
}
}
info!("server shutdown");
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::acceptor::{Binder, TcpBinder};
use crate::byte_stream::test::*;
use crate::config::*;
use crate::connector::*;
use crate::model;
use std::borrow::Cow;
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use std::time::{Duration, SystemTime};
#[test]
fn server_shutdown() {
let config = ServerConfig::default();
let (tx_done, rx_done) = mpsc::sync_channel(1);
let (mut server, tx) = Server::with_binder(
config,
TcpBinder::new(
None,
Arc::new(Mutex::new(rx_done)),
Some(Duration::from_secs(3)),
),
tx_done,
TcpUdpConnector::new(None),
);
let req_shutdown = Arc::new(Mutex::new(SystemTime::now()));
let th = {
let req_shutdown = req_shutdown.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
*req_shutdown.lock().unwrap() = SystemTime::now();
tx.send(ServerCommand::Terminate).unwrap();
})
};
server.serve().ok();
let shutdown = SystemTime::now();
th.join().unwrap();
assert!(&shutdown > req_shutdown.lock().unwrap().deref());
}
struct DummyBinder {
stream: BufferStream,
src_addr: SocketAddr,
}
impl Binder for DummyBinder {
type Stream = BufferStream;
type Iter = std::iter::Once<(Self::Stream, SocketAddr)>;
fn bind(&self, addr: SocketAddr) -> Result<Self::Iter, model::Error> {
println!("bind: {}", addr);
Ok(std::iter::once((self.stream.clone(), self.src_addr)))
}
}
#[test]
fn dummy_binder() {
let binder = DummyBinder {
stream: BufferStream::with_buffer(
Cow::from(b"dummy read".to_vec()),
Cow::from(b"dummy write".to_vec()),
),
src_addr: "127.0.0.1:1080".parse().unwrap(),
};
let tx = Arc::new(Mutex::new(None));
let th = {
let tx = tx.clone();
thread::spawn(move || {
let (tx_done, _rx_done) = mpsc::sync_channel(1);
let (mut server, stx) = Server::with_binder(
ServerConfig::default(),
binder,
tx_done,
TcpUdpConnector::new(None),
);
*tx.lock().unwrap() = Some(stx);
server.serve().ok();
})
};
thread::sleep(Duration::from_secs(1));
tx.lock()
.unwrap()
.as_ref()
.unwrap()
.send(ServerCommand::Terminate)
.unwrap();
th.join().unwrap();
}
}
| 35.446629 | 95 | 0.3845 |
abea425392e8cc760bb63a1fc7928e90e7c5a434 | 7,342 | use crate::codegen::accounts::{constraints, generics};
use crate::{AccountField, AccountsStruct, Field, SysvarTy, Ty};
use proc_macro2::TokenStream;
use quote::quote;
// Generates the `Accounts` trait implementation.
pub fn generate(accs: &AccountsStruct) -> proc_macro2::TokenStream {
let name = &accs.ident;
let (combined_generics, trait_generics, strct_generics) = generics(accs);
// All fields without an `#[account(associated)]` attribute.
let non_associated_fields: Vec<&AccountField> = accs
.fields
.iter()
.filter(|af| !is_associated_init(af))
.collect();
// Deserialization for each field
let deser_fields: Vec<proc_macro2::TokenStream> = accs
.fields
.iter()
.map(|af: &AccountField| {
match af {
AccountField::CompositeField(s) => {
let name = &s.ident;
let ty = &s.raw_field.ty;
quote! {
#[cfg(feature = "anchor-debug")]
::solana_program::log::sol_log(stringify!(#name));
let #name: #ty = anchor_lang::Accounts::try_accounts(program_id, accounts)?;
}
}
AccountField::Field(f) => {
// Associated fields are *first* deserialized into
// AccountInfos, and then later deserialized into
// ProgramAccounts in the "constraint check" phase.
if is_associated_init(af) {
let name = &f.ident;
quote!{
let #name = &accounts[0];
*accounts = &accounts[1..];
}
} else {
let name = typed_ident(&f);
match f.constraints.is_init() {
false => quote! {
#[cfg(feature = "anchor-debug")]
::solana_program::log::sol_log(stringify!(#name));
let #name = anchor_lang::Accounts::try_accounts(program_id, accounts)?;
},
true => quote! {
#[cfg(feature = "anchor-debug")]
::solana_program::log::sol_log(stringify!(#name));
let #name = anchor_lang::AccountsInit::try_accounts_init(program_id, accounts)?;
},
}
}
}
}
})
.collect();
// Deserialization for each *associated* field. This must be after
// the deser_fields.
let deser_associated_fields: Vec<proc_macro2::TokenStream> = accs
.fields
.iter()
.filter_map(|af| match af {
AccountField::CompositeField(_s) => None,
AccountField::Field(f) => match is_associated_init(af) {
false => None,
true => Some(f),
},
})
.map(|field: &Field| constraints::generate(field))
.collect();
// Constraint checks for each account fields.
let access_checks: Vec<proc_macro2::TokenStream> = non_associated_fields
.iter()
.map(|af: &&AccountField| match af {
AccountField::Field(f) => constraints::generate(f),
AccountField::CompositeField(s) => constraints::generate_composite(s),
})
.collect();
// Each field in the final deserialized accounts struct.
let return_tys: Vec<proc_macro2::TokenStream> = accs
.fields
.iter()
.map(|f: &AccountField| {
let name = match f {
AccountField::CompositeField(s) => &s.ident,
AccountField::Field(f) => &f.ident,
};
quote! {
#name
}
})
.collect();
quote! {
impl#combined_generics anchor_lang::Accounts#trait_generics for #name#strct_generics {
#[inline(never)]
fn try_accounts(
program_id: &anchor_lang::solana_program::pubkey::Pubkey,
accounts: &mut &[anchor_lang::solana_program::account_info::AccountInfo<'info>],
) -> std::result::Result<Self, anchor_lang::solana_program::program_error::ProgramError> {
// Deserialize each account.
#(#deser_fields)*
// Deserialize each associated account.
//
// Associated accounts are treated specially, because the fields
// do deserialization + constraint checks in a single go,
// whereas all other fields, i.e. the `deser_fields`, first
// deserialize, and then do constraint checks.
#(#deser_associated_fields)*
// Perform constraint checks on each account.
#(#access_checks)*
// Success. Return the validated accounts.
Ok(#name {
#(#return_tys),*
})
}
}
}
}
// Returns true if the given AccountField has an associated init constraint.
fn is_associated_init(af: &AccountField) -> bool {
match af {
AccountField::CompositeField(_s) => false,
AccountField::Field(f) => f
.constraints
.associated
.as_ref()
.map(|f| f.is_init)
.unwrap_or(false),
}
}
fn typed_ident(field: &Field) -> TokenStream {
let name = &field.ident;
let ty = match &field.ty {
Ty::AccountInfo => quote! { AccountInfo },
Ty::ProgramState(ty) => {
let account = &ty.account_ident;
quote! {
ProgramState<#account>
}
}
Ty::CpiState(ty) => {
let account = &ty.account_ident;
quote! {
CpiState<#account>
}
}
Ty::ProgramAccount(ty) => {
let account = &ty.account_ident;
quote! {
ProgramAccount<#account>
}
}
Ty::Loader(ty) => {
let account = &ty.account_ident;
quote! {
Loader<#account>
}
}
Ty::CpiAccount(ty) => {
let account = &ty.account_ident;
quote! {
CpiAccount<#account>
}
}
Ty::Sysvar(ty) => {
let account = match ty {
SysvarTy::Clock => quote! {Clock},
SysvarTy::Rent => quote! {Rent},
SysvarTy::EpochSchedule => quote! {EpochSchedule},
SysvarTy::Fees => quote! {Fees},
SysvarTy::RecentBlockhashes => quote! {RecentBlockhashes},
SysvarTy::SlotHashes => quote! {SlotHashes},
SysvarTy::SlotHistory => quote! {SlotHistory},
SysvarTy::StakeHistory => quote! {StakeHistory},
SysvarTy::Instructions => quote! {Instructions},
SysvarTy::Rewards => quote! {Rewards},
};
quote! {
Sysvar<#account>
}
}
};
quote! {
#name: #ty
}
}
| 36.894472 | 112 | 0.488287 |
26da64686ecd3cda10c4834f0086b000b2956c80 | 2,168 | use crate::components::score::{Position, Score};
use crate::components::Drawable;
use crate::data::WorldData;
use crate::graphics::data;
use crate::graphics::data::NumberTile;
use crate::rect::Rect;
use specs::{Builder, World, WorldExt};
const TILE_TO_WORLD_DIVIDER: u32 = 8;
const DISTANCE_FROM_WORLD_EDGE: i32 = 16;
const NUMBER_SPACING: u32 = 2;
pub struct ScoreEntity;
impl ScoreEntity {
pub fn create_all_tiles(world: &mut World, world_data: &WorldData) {
ScoreEntity::create_tile(world, world_data, Position::One);
ScoreEntity::create_tile(world, world_data, Position::Ten);
ScoreEntity::create_tile(world, world_data, Position::Hundred);
ScoreEntity::create_tile(world, world_data, Position::Thousand);
ScoreEntity::create_tile(world, world_data, Position::TenThousand);
ScoreEntity::create_tile(world, world_data, Position::HundredThousand);
}
fn create_tile(world: &mut World, world_data: &WorldData, position: Position) {
let tile_data = data::build_tile_data(data::Tile::Number {
tile: NumberTile::Zero,
});
let width_in_world = tile_data.bounds_in_tile_sheet.width() / TILE_TO_WORLD_DIVIDER;
let height_in_world = tile_data.bounds_in_tile_sheet.height() / TILE_TO_WORLD_DIVIDER;
let distance_from_world_right = DISTANCE_FROM_WORLD_EDGE
+ match position {
Position::One => 1,
Position::Ten => 2,
Position::Hundred => 3,
Position::Thousand => 4,
Position::TenThousand => 5,
Position::HundredThousand => 6,
} * (width_in_world + NUMBER_SPACING) as i32;
let drawable = Drawable {
tile_data,
world_bounds: Rect::new(
world_data.bounds().right() - distance_from_world_right,
world_data.bounds().top() + DISTANCE_FROM_WORLD_EDGE,
width_in_world,
height_in_world,
),
};
world
.create_entity()
.with(Score { position })
.with(drawable)
.build();
}
}
| 36.745763 | 94 | 0.625 |
1ab2eaaa4aafb5926d2d27c9c4279260dc1717c7 | 4,294 | /**
* Copyright 2021 Rigetti Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
use std::fmt;
#[derive(Debug, Clone)]
pub enum TranslationError {
InvalidLLVMByteCode,
NoEntryPoint,
UndefinedEntryPoint(String),
UnsupportedFunctionCall(String),
#[allow(dead_code)]
UnsupportedLLVMInstruction(String),
#[allow(dead_code)]
UnsupportedLLVMTerminator(String),
UnexpectedVariableType(String, String),
MissingDestinationForCall,
MissingOperandName,
MissingFunctionDefinition(String),
ExpectedLocalOperandForCall,
ExpectedConstantOperand,
CannotResolveLocalVariableName(String),
CannotResolveLocalVariableValue(String),
UnsupportedParameterType(String, String),
UnsupportedFloatType(String),
UnexpectedConstantType(String, String),
UnexpectedOperandType(String, String),
NonIntegerIndex,
UnsupportedAllocationType(String, String),
}
impl fmt::Display for TranslationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use TranslationError::*;
match self {
UndefinedEntryPoint(entrypoint) => write!(
f,
"Could not find entry point named '{}' in module",
entrypoint
),
NoEntryPoint => {
write!(f, "Could not find entry point in module")
}
UnsupportedFunctionCall(s) => {
write!(f, "Unsupported function call '{}'", s)
}
UnexpectedVariableType(name, expected) => {
write!(f, "Expected type {} for variable {}", name, expected)
}
MissingDestinationForCall => {
write!(f, "Missing destination for function call")
}
ExpectedLocalOperandForCall => {
write!(f, "Expected a local operand for function call")
}
MissingOperandName => write!(f, "Missing operand name"),
UnsupportedLLVMInstruction(instr) => {
write!(f, "Unsupported LLVM instruction '{}'", instr)
}
UnsupportedLLVMTerminator(term) => {
write!(f, "Unsupported LLVM terminator '{}'", term)
}
MissingFunctionDefinition(s) => {
write!(f, "Missing definition for function '{}'", s)
}
ExpectedConstantOperand => write!(f, "Expected a constant operand"),
InvalidLLVMByteCode => write!(f, "Could not parse LLVM BC file"),
CannotResolveLocalVariableName(name) => {
write!(f, "Could not resolve local variable {}", name)
}
CannotResolveLocalVariableValue(name) => {
write!(f, "Could not resolve local variable {} to a value", name)
}
UnsupportedParameterType(name, actual) => {
write!(f, "Parameter {} has unsupported type {}", name, actual)
}
UnsupportedFloatType(actual) => {
write!(f, "Cannot convert the LLVM float {} to f64", actual)
}
UnexpectedConstantType(expected, actual) => {
write!(f, "Expected {} for constant, got {}", expected, actual)
}
UnexpectedOperandType(expected, actual) => {
write!(f, "Expected operand of type {}, got {}", expected, actual)
}
NonIntegerIndex => write!(f, "Cannot index an array with a non-integer value"),
UnsupportedAllocationType(expected, actual) => {
write!(
f,
"Expected an allocation for type {}, got {}",
expected, actual
)
}
}
}
}
| 39.394495 | 91 | 0.583372 |
e801aa80eef4c9197ae87725d72deeeb55f254da | 1,377 | // This file is part of Substrate.
// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shared logic between on-chain and off-chain components used for slashing using an off-chain
//! worker.
use super::SessionIndex;
use codec::Encode;
use sp_std::prelude::*;
pub(super) const PREFIX: &[u8] = b"session_historical";
pub(super) const LAST_PRUNE: &[u8] = b"session_historical_last_prune";
/// Derive the key used to store the list of validators
pub(super) fn derive_key<P: AsRef<[u8]>>(prefix: P, session_index: SessionIndex) -> Vec<u8> {
let prefix: &[u8] = prefix.as_ref();
session_index.using_encoded(|encoded_session_index| {
prefix
.into_iter()
.chain(b"/".into_iter())
.chain(encoded_session_index.into_iter())
.copied()
.collect::<Vec<u8>>()
})
}
| 34.425 | 95 | 0.724764 |
f7506829424bfc252f51d695cb9a76340ffcc280 | 7,098 | use std::iter::Peekable;
use std::str::Chars;
use token::Token;
pub struct LexingError {
line: usize,
row: usize,
msg: String,
}
impl LexingError {
fn new(line: usize, row: usize, msg: String) -> LexingError {
LexingError { line: line, row: row, msg: msg }
}
pub fn to_string(self) -> String {
format!("Lexing Error: line {}, row {}, {}", self.line, self.row, self.msg)
}
}
fn symbol_to_token(ch: char) -> Token {
match ch {
'+' => Token::Plus,
'<' => Token::Lt,
'(' => Token::LParen,
')' => Token::RParen,
'[' => Token::LBracket,
']' => Token::RBracket,
'{' => Token::LBrace,
'}' => Token::RBrace,
':' => Token::Colon,
',' => Token::Comma,
'.' => Token::Dot,
_ => panic!("Invalid symbol"),
}
}
fn ident_to_token(s: String) -> Token {
match &s[..] {
"None" => Token::None,
"True" => Token::True,
"False" => Token::False,
"if" => Token::If,
"else" => Token::Else,
"while" => Token::While,
"for" => Token::For,
"in" => Token::In,
"break" => Token::Break,
"continue" => Token::Continue,
"try" => Token::Try,
"except" => Token::Except,
"raise" => Token::Raise,
"def" => Token::Def,
"return" => Token::Return,
"assert" => Token::Assert,
"class" => Token::Class,
_ => Token::Ident(s),
}
}
fn is_number(ch: char) -> bool {
match ch {
'0' ... '9' => true,
_ => false
}
}
fn is_alphabet(ch: char) -> bool {
match ch {
'_' | 'a' ... 'z' | 'A' ... 'Z' => true,
_ => false
}
}
fn is_alphanumeric(ch: char) -> bool {
is_number(ch) || is_alphabet(ch)
}
fn is_whitespace(ch: char) -> bool {
ch == ' '
}
fn is_not_quote(ch: char) -> bool {
ch != '\''
}
fn is_not_dquote(ch: char) -> bool {
ch != '"'
}
struct Lexer<'a> {
it: Peekable<Chars<'a>>,
line: usize,
row: usize,
stack: Vec<usize>,
is_line_head: bool,
tokens: Vec<Token>
}
impl <'a>Lexer<'a> {
fn new(s: &'a String) -> Lexer<'a> {
Lexer { it: s.chars().peekable(), line: 1, row: 1, stack: vec![0],
is_line_head: true, tokens: vec![] }
}
fn next(&mut self) -> Option<char> {
match self.it.next() {
Some('\n') => {
self.line += 1;
self.row = 0;
Some('\n')
},
Some(c) => {
self.row += 1;
Some(c)
},
None => None,
}
}
fn calc_indent(&mut self, indent_level: usize) -> Result<(), LexingError> {
let mut last_indent_level = *(self.stack.last().unwrap());
if indent_level > last_indent_level {
self.stack.push(indent_level);
self.tokens.push(Token::Indent);
} else if indent_level < last_indent_level {
loop {
self.stack.pop();
self.tokens.push(Token::Dedent);
last_indent_level = *(self.stack.last().unwrap());
if indent_level == last_indent_level {
break;
} else if indent_level > last_indent_level {
return Err(self.error("Invalid indentation".to_string()))
}
}
};
Ok(())
}
fn consume(&mut self, c1: char) -> Option<char> {
match self.next() {
Some(c2) if c1 == c2 => {
Some(c1)
},
_ => None,
}
}
fn consume_while<X>(&mut self, f: X) -> Vec<char>
where X: Fn(char) -> bool {
let mut v: Vec<char> = vec![];
while let Some(&ch) = self.it.peek() {
if f(ch) {
self.next(); v.push(ch)
} else {
break;
}
}
v
}
fn error(&self, s: String) -> LexingError {
LexingError::new(self.line, self.row, s)
}
}
pub fn tokenize(s: String) -> Result<Vec<Token>, LexingError> {
let mut lexer = Lexer::new(&s);
loop {
// consume blank lines
if lexer.is_line_head {
let indent_level = lexer.consume_while(is_whitespace).len();
match lexer.it.peek() {
Some('\n') => {
lexer.next();
continue
},
Some(_) => {
try!(lexer.calc_indent(indent_level));
lexer.is_line_head = false;
},
_ => break,
}
};
let mut ch = '0';
match lexer.it.peek() {
Some(&ch_) => { ch = ch_ },
None => break
};
match ch {
'0' ... '9' => {
let num: String = lexer.consume_while(is_number).into_iter().collect();
lexer.tokens.push(Token::Int(num.parse::<i32>().unwrap()));
},
'\'' => {
lexer.next();
let s: String = lexer.consume_while(is_not_quote).into_iter().collect();
lexer.tokens.push(Token::Str(s));
try!(lexer.consume('\'').ok_or(lexer.error("\' expected".to_string())));
},
'"' => {
lexer.next();
let s: String = lexer.consume_while(is_not_dquote).into_iter().collect();
lexer.tokens.push(Token::Str(s));
lexer.consume('"').ok_or(lexer.error("\" expected".to_string()))?;
},
'+' | '<' | '(' | ')' | '[' | ']' | '{' | '}' | ':' | ',' | '.' => {
let nch = lexer.next().unwrap();
lexer.tokens.push(symbol_to_token(nch))
},
'=' => {
lexer.next();
if *lexer.it.peek().unwrap() != '=' {
lexer.tokens.push(Token::Eq)
} else {
lexer.next();
lexer.tokens.push(Token::EqEq)
}
},
'\n' => {
lexer.next();
lexer.tokens.push(Token::NewLine);
lexer.is_line_head = true;
}
ch if is_alphabet(ch) => {
let nch = lexer.next().unwrap();
let mut id_vec = lexer.consume_while(is_alphanumeric);
id_vec.insert(0, nch);
lexer.tokens.push(ident_to_token(id_vec.into_iter().collect()));
},
ch if is_whitespace(ch) => {
lexer.consume_while(is_whitespace);
}
_ => return Err(lexer.error(format!("Invalid character {} used", ch)))
}
};
loop {
match lexer.stack.pop() {
Some(i) if i != 0 => lexer.tokens.push(Token::Dedent),
_ => break,
}
}
lexer.tokens.push(Token::EOF);
Ok(lexer.tokens)
}
pub fn print_tokens(tokens: &Vec<Token>) {
for t in tokens {
println!("{:?}", t);
}
}
| 27.726563 | 89 | 0.438856 |
61cbda9b318f176c8aacc37cdafd0b0ccdd7c404 | 8,527 | //! This is a lightly modified version of the `path-clean` crate, which diverges a bit from the original
//! semantics/behavior.
//!
//! `path-clean` is a modification of a Rust port of the the `cleanname` procedure from the Plan 9 C library,
//! and is similar to [`path.Clean`](https://golang.org/pkg/path/#Clean) from the Go standard library. However,
//! unlike both of these functions, this module will not remove `..` elements which begin a path.
//!
//! It works as follows:
//!
//! 1. Reduce multiple slashes to a single slash.
//! 2. Eliminate `.` path name elements (the current directory).
//! 3. Eliminate `..` path name elements (the parent directory) and the non-`.` non-`..`, element that precedes them.
//! 4. Leave intact `..` elements that begin a path.
//!
//! If the result of this process is an empty string, return the string `"."`, representing the current directory.
//!
//! It performs this transform lexically, without touching the filesystem. Therefore it doesn't do
//! any symlink resolution or absolute path resolution. For more information you can see ["Getting Dot-Dot
//! Right"](https://9p.io/sys/doc/lexnames.html).
//!
//! For convenience, the [`PathClean`] trait is exposed and comes implemented for [`std::path::PathBuf`].
//!
//! ```rust
//! # use std::path::{Path, PathBuf};
//! # use sludge::path_clean::{clean, PathClean};
//! assert_eq!(clean("hello/world/.."), "hello");
//! assert_eq!(
//! Path::new("/test/../path/").clean(),
//! PathBuf::from("/path")
//! );
//! ```
/*
* Copyright (c) 2018 Dan Reeves
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use std::path::{Path, PathBuf};
/// The Clean trait implements a `clean` method. It's recommended you use the provided [`clean`]
/// function.
pub trait PathClean<T> {
fn clean(&self) -> T;
}
/// PathClean implemented for PathBuf
impl PathClean<PathBuf> for Path {
fn clean(&self) -> PathBuf {
PathBuf::from(clean(self.to_str().unwrap_or("")))
}
}
pub fn clean(path: &str) -> String {
let out = clean_internal(path.as_bytes());
// The code only matches/modifies ascii tokens and leaves the rest of
// the bytes as they are, so if the input string is valid utf8 the result
// will also be valid utf8.
unsafe { String::from_utf8_unchecked(out) }
}
/// The core implementation. It performs the following, lexically:
/// 1. Reduce multiple slashes to a single slash.
/// 2. Eliminate `.` path name elements (the current directory).
/// 3. Eliminate `..` path name elements (the parent directory) and the non-`.` non-`..`, element that precedes them.
/// 4. Leave intact `..` elements that begin a path.
///
/// If the result of this process is an empty string, return the string `"."`, representing the current directory.
fn clean_internal(path: &[u8]) -> Vec<u8> {
static DOT: u8 = b'.';
static SEP: u8 = b'/';
fn is_sep(b: u8) -> bool {
b == b'/' || b == b'\\'
}
if path.is_empty() {
return vec![DOT];
}
let rooted = is_sep(path[0]);
let n = path.len();
// Invariants:
// - reading from path; r is index of next byte to process.
// - dotdot is index in out where .. must stop, either because it is the
// leading slash or it is a leading ../../.. prefix.
//
// The go code this function is based on handles already-clean paths without
// an allocation, but I haven't done that here because I think it
// complicates the return signature too much.
let mut out: Vec<u8> = Vec::with_capacity(n);
let mut r = 0;
let mut dotdot = 0;
if rooted {
out.push(SEP);
r = 1;
dotdot = 1;
}
while r < n {
if is_sep(path[r]) || path[r] == DOT && (r + 1 == n || is_sep(path[r + 1])) {
// empty path element || . element: skip
r += 1;
} else if path[r] == DOT && path[r + 1] == DOT && (r + 2 == n || is_sep(path[r + 2])) {
// .. element: remove to last separator
r += 2;
if out.len() > dotdot {
// can backtrack, truncate to last separator
let mut w = out.len() - 1;
while w > dotdot && !is_sep(out[w]) {
w -= 1;
}
out.truncate(w);
} else {
// cannot backtrack, so append .. element
if !out.is_empty() && !(rooted && out.len() == 1) {
out.push(SEP);
}
out.push(DOT);
out.push(DOT);
dotdot = out.len();
}
} else {
// real path element
// add slash if needed
if rooted && out.len() != 1 || !rooted && !out.is_empty() {
out.push(SEP);
}
while r < n && !is_sep(path[r]) {
out.push(path[r]);
r += 1;
}
}
}
// Turn empty string into "."
if out.is_empty() {
out.push(DOT);
}
out
}
#[cfg(test)]
mod tests {
use super::{clean, PathClean};
use std::path::PathBuf;
#[test]
fn test_empty_path_is_current_dir() {
assert_eq!(clean(""), ".");
}
#[test]
fn test_clean_paths_dont_change() {
let tests = vec![(".", "."), ("..", ".."), ("/", "/"), ("\\", "/")];
for test in tests {
assert_eq!(clean(test.0), test.1);
}
}
#[test]
fn test_replace_multiple_slashes() {
let tests = vec![
("/", "/"),
("//", "/"),
("/\\/", "/"),
(".//", "."),
("//..", "/.."),
("..//", ".."),
("/..//", "/.."),
("/.//./", "/"),
("././/./", "."),
("path//to///thing", "path/to/thing"),
("/path//to///thing", "/path/to/thing"),
];
for test in tests {
assert_eq!(clean(test.0), test.1, "original: {}", test.0);
}
}
#[test]
fn test_eliminate_current_dir() {
let tests = vec![
("./", "."),
("/./", "/"),
("./test", "test"),
("./test/./path", "test/path"),
("/test/./path/", "/test/path"),
("test/path/.", "test/path"),
];
for test in tests {
assert_eq!(clean(test.0), test.1, "original: {}", test.0);
}
}
#[test]
fn test_eliminate_parent_dir() {
let tests = vec![
("/..", "/.."),
("/../test", "/../test"),
("test/..", "."),
("test/path/..", "test"),
("test/../path", "path"),
("/test/../path", "/path"),
("test/path/../../", "."),
("test/path/../../..", ".."),
("/test/path/../../..", "/.."),
("/test/path/../../../..", "/../.."),
("test/path/../../../..", "../.."),
("test/path/../../another/path", "another/path"),
("test/path/../../another/path/..", "another"),
("../test", "../test"),
("../test/", "../test"),
("../test/path", "../test/path"),
("../test/..", ".."),
];
for test in tests {
assert_eq!(clean(test.0), test.1, "original: {}", test.0);
}
}
#[test]
fn test_pathbuf_trait() {
assert_eq!(
PathBuf::from("/test/../path/").clean(),
PathBuf::from("/path")
);
}
}
| 33.570866 | 117 | 0.524217 |
0a9339c01b548725f65c612eedf466bf486e0289 | 31,568 | // MakAir Telemetry
//
// Copyright: 2020, Makers For Life
// License: Public Domain License
#![allow(clippy::upper_case_acronyms)]
use std::cmp::{Ord, Ordering, PartialOrd};
use std::convert::TryFrom;
use std::io;
use thiserror::Error;
use crate::control::ControlSetting;
use crate::locale::Locale;
/// Variants of the MakAir firmware
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum Mode {
/// Production mode
Production = 1,
/// (obsolete) Qualification mode
Qualification = 2,
/// (obsolete) Integration test mode
IntegrationTest = 3,
}
/// Phases of the respiratory cycle
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum Phase {
/// Inhalation
Inhalation,
/// Exhalation
Exhalation,
}
/// [obsolete in protocol v2] Sub-phases of the respiratory cycle
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum SubPhase {
/// Inspiration
Inspiration,
/// HoldInspiration
HoldInspiration,
/// Exhale
Exhale,
}
/// Supported alarm priorities
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum AlarmPriority {
/// High
High,
/// Medium
Medium,
/// Low
Low,
}
impl PartialOrd for AlarmPriority {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for AlarmPriority {
fn cmp(&self, other: &Self) -> Ordering {
let priority_to_int = |priority: &Self| match priority {
Self::High => 3,
Self::Medium => 2,
Self::Low => 1,
};
priority_to_int(self).cmp(&priority_to_int(other))
}
}
impl TryFrom<u8> for AlarmPriority {
type Error = io::Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
10..=19 | 40..=49 => Ok(Self::High),
20..=29 => Ok(Self::Medium),
30..=39 => Ok(Self::Low),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid priority {}", value),
)),
}
}
}
/// Supported ventilation modes
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
#[allow(non_camel_case_types)]
pub enum VentilationMode {
/// PC-CMV
PC_CMV = 1,
/// PC-AC (default)
PC_AC = 2,
/// VC-CMV
VC_CMV = 3,
/// PC-VSAI
PC_VSAI = 4,
/// VC-AC
VC_AC = 5,
}
/// Ventilation mode class
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VentilationModeClass {
/// PC
Pressure,
/// VC
Volume,
}
/// Ventilation mode kind
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VentilationModeKind {
/// CMV
Cmv,
/// AC
Ac,
/// VSAI
Vsai,
}
impl TryFrom<u8> for VentilationMode {
type Error = io::Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
1 => Ok(Self::PC_CMV),
2 => Ok(Self::PC_AC),
3 => Ok(Self::VC_CMV),
4 => Ok(Self::PC_VSAI),
5 => Ok(Self::VC_AC),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid ventilation mode {}", value),
)),
}
}
}
impl Default for VentilationMode {
fn default() -> Self {
Self::PC_AC
}
}
impl From<&VentilationMode> for u8 {
fn from(mode: &VentilationMode) -> u8 {
*mode as u8
}
}
impl VentilationMode {
/// Get the class of the ventilation mode
pub fn class(&self) -> VentilationModeClass {
match self {
Self::PC_CMV | Self::PC_AC | Self::PC_VSAI => VentilationModeClass::Pressure,
Self::VC_CMV | Self::VC_AC => VentilationModeClass::Volume,
}
}
/// Get the kind of the ventilation mode
pub fn kind(&self) -> VentilationModeKind {
match self {
Self::PC_CMV | Self::VC_CMV => VentilationModeKind::Cmv,
Self::PC_AC | Self::VC_AC => VentilationModeKind::Ac,
Self::PC_VSAI => VentilationModeKind::Vsai,
}
}
}
/// Details of fatal errors
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum FatalErrorDetails {
/// MCU was restarted by watchdog
WatchdogRestart,
/// Calibration failed
CalibrationError {
/// Measured pressure offset in mmH2O
pressure_offset: i16,
/// Minimum presure measured during calibration in mmH2O
min_pressure: i16,
/// Maximum presure measured during calibration in mmH2O
max_pressure: i16,
/// Air flow measured at starting in cL/min (SLM * 100)
flow_at_starting: Option<i16>,
/// Air flow measured with blower ON in cL/min (SLM * 100)
flow_with_blower_on: Option<i16>,
},
/// Battery is too discharged
BatteryDeeplyDischarged {
/// Battery level in centivolts
battery_level: u16,
},
/// Could not read mass flow meter
MassFlowMeterError,
/// Read an inconsistent pressure
InconsistentPressure {
/// Measured pressure in mmH2O
pressure: u16,
},
}
/// Step of the end of line test
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
#[allow(non_camel_case_types, missing_docs)]
pub enum EolTestStep {
START,
SUPPLY_TO_EXPANDER_NOT_CONNECTED,
CHECK_FAN,
TEST_BAT_DEAD,
BATTERY_DEEP_DISCHARGE,
DISCONNECT_MAINS,
CONNECT_MAINS,
CHECK_BUZZER,
CHECK_ALL_BUTTONS,
CHECK_UI_SCREEN,
PLUG_AIR_TEST_SYTEM,
REACH_MAX_PRESSURE,
MAX_PRESSURE_REACHED_OK,
MAX_PRESSURE_NOT_REACHED,
START_LEAK_MESURE,
LEAK_IS_TOO_HIGH,
REACH_NULL_PRESSURE,
MIN_PRESSURE_NOT_REACHED,
USER_CONFIRMATION_BEFORE_O2_TEST,
START_O2_TEST,
O2_PRESSURE_NOT_REACH,
WAIT_USER_BEFORE_LONG_RUN,
START_LONG_RUN_BLOWER,
PRESSURE_NOT_STABLE,
FLOW_NOT_STABLE,
END_SUCCESS,
DISPLAY_PRESSURE,
DISPLAY_FLOW,
}
impl TryFrom<u8> for EolTestStep {
type Error = io::Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(Self::START),
1 => Ok(Self::SUPPLY_TO_EXPANDER_NOT_CONNECTED),
2 => Ok(Self::CHECK_FAN),
3 => Ok(Self::TEST_BAT_DEAD),
4 => Ok(Self::BATTERY_DEEP_DISCHARGE),
5 => Ok(Self::DISCONNECT_MAINS),
6 => Ok(Self::CONNECT_MAINS),
7 => Ok(Self::CHECK_BUZZER),
8 => Ok(Self::CHECK_ALL_BUTTONS),
9 => Ok(Self::CHECK_UI_SCREEN),
10 => Ok(Self::PLUG_AIR_TEST_SYTEM),
11 => Ok(Self::REACH_MAX_PRESSURE),
12 => Ok(Self::MAX_PRESSURE_REACHED_OK),
13 => Ok(Self::MAX_PRESSURE_NOT_REACHED),
14 => Ok(Self::START_LEAK_MESURE),
15 => Ok(Self::LEAK_IS_TOO_HIGH),
16 => Ok(Self::REACH_NULL_PRESSURE),
17 => Ok(Self::MIN_PRESSURE_NOT_REACHED),
18 => Ok(Self::USER_CONFIRMATION_BEFORE_O2_TEST),
19 => Ok(Self::START_O2_TEST),
20 => Ok(Self::O2_PRESSURE_NOT_REACH),
21 => Ok(Self::WAIT_USER_BEFORE_LONG_RUN),
22 => Ok(Self::START_LONG_RUN_BLOWER),
23 => Ok(Self::PRESSURE_NOT_STABLE),
24 => Ok(Self::FLOW_NOT_STABLE),
25 => Ok(Self::END_SUCCESS),
26 => Ok(Self::DISPLAY_PRESSURE),
27 => Ok(Self::DISPLAY_FLOW),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid EOL test step {}", value),
)),
}
}
}
/// Content of end of line test snapshots
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum EolTestSnapshotContent {
/// Test is in progress
InProgress(String),
/// There was an error during test
Error(String),
/// End of line test succeeded
Success(String),
}
/// Patient gender
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum PatientGender {
/// Male
Male = 0,
/// Female
Female = 1,
}
impl TryFrom<u8> for PatientGender {
type Error = io::Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(Self::Male),
1 => Ok(Self::Female),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid patient gender {}", value),
)),
}
}
}
impl Default for PatientGender {
fn default() -> Self {
Self::Male
}
}
impl From<&PatientGender> for u8 {
fn from(gender: &PatientGender) -> u8 {
*gender as u8
}
}
/// A telemetry message that is sent once every time the MCU boots
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct BootMessage {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// Firmware variant currently flashed
pub mode: Mode,
/// The number "128"
///
/// This is only used to make sure that serial port was correctly opened and that there is no endianness problem.
pub value128: u8,
}
/// A telemetry message that is sent every 100 ms when the MCU is in "stop" mode
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct StoppedMessage {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// [protocol v2] Requested peak command in cmH2O
pub peak_command: Option<u8>,
/// [protocol v2] Requested plateau command in cmH2O
pub plateau_command: Option<u8>,
/// [protocol v2] Requested PEEP command in cmH2O
pub peep_command: Option<u8>,
/// [protocol v2] Requested number of cycles per minute
pub cpm_command: Option<u8>,
/// [protocol v2] Expiration term in the "Inspiration/Expiration" ratio given that Inspiration = 10
pub expiratory_term: Option<u8>,
/// [protocol v2] State of the trigger
pub trigger_enabled: Option<bool>,
/// [protocol v2] Trigger offset in mmH2O
pub trigger_offset: Option<u8>,
/// [protocol v2] State of the alarm snooze
pub alarm_snoozed: Option<bool>,
/// [protocol v2] CPU load in percent
pub cpu_load: Option<u8>,
/// Ventilation mode
pub ventilation_mode: VentilationMode,
/// [protocol v2] Inspiratory trigger flow in percent
pub inspiratory_trigger_flow: Option<u8>,
/// [protocol v2] Expiratory trigger flow in percent
pub expiratory_trigger_flow: Option<u8>,
/// [protocol v2] Minimum duration of inhalation in ms
pub ti_min: Option<u16>,
/// [protocol v2] Maximum duration of inhalation in ms
pub ti_max: Option<u16>,
/// [protocol v2] Threshold for low inspiratory minute volume alarm in L/min
pub low_inspiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for high inspiratory minute volume alarm in L/min
pub high_inspiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for low expiratory minute volume alarm in L/min
pub low_expiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for high expiratory minute volume alarm in L/min
pub high_expiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for low respiratory rate alarm in cycle per minute
pub low_respiratory_rate_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for high respiratory rate alarm in cycle per minute
pub high_respiratory_rate_alarm_threshold: Option<u8>,
/// [protocol v2] Target tidal volume in mL
pub target_tidal_volume: Option<u16>,
/// [protocol v2] Threshold for low tidal volume in mL
pub low_tidal_volume_alarm_threshold: Option<u16>,
/// [protocol v2] Threshold for high tidal volume in mL
pub high_tidal_volume_alarm_threshold: Option<u16>,
/// [protocol v2] Duration in ms of closing both valves to effectively measure plateau pressure in volume control modes
pub plateau_duration: Option<u16>,
/// [protocol v2] Threshold for leak alarm in cL/min
pub leak_alarm_threshold: Option<u16>,
/// [protocol v2] Target flow during inspiration in L/min
pub target_inspiratory_flow: Option<u8>,
/// [protocol v2] Requested duration of inspiration in ms
pub inspiratory_duration_command: Option<u16>,
/// [protocol v2] Measured battery level value in centivolts (precise value)
pub battery_level: Option<u16>,
/// [protocol v2] Codes of the alarms that are currently triggered
pub current_alarm_codes: Option<Vec<u8>>,
/// [protocol v2] Language of the system
pub locale: Option<Locale>,
/// [protocol v2] Patient's height in centimeters
pub patient_height: Option<u8>,
/// [protocol v2] Patient's gender
pub patient_gender: Option<PatientGender>,
/// [protocol v2] Threshold for peak pressure alarm in mmH2O
pub peak_pressure_alarm_threshold: Option<u16>,
}
/// A telemetry message that is sent every time the firmware does a control iteration (every 10 ms)
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct DataSnapshot {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// Number of hundredth of seconds since the begining of the current breathing cycle
pub centile: u16,
/// Current pressure in mmH2O (can be negative)
///
/// _[protocol v2] Changed from u16 to i16 (values above i16::MAX will be assigned the value i16::MAX, but this should not happen)_
pub pressure: i16,
/// Current phase
pub phase: Phase,
/// [obsolete in protocol v2] Current sub-phase
pub subphase: Option<SubPhase>,
/// Current angle of the blower valve
pub blower_valve_position: u8,
/// Current angle of the patient valve
pub patient_valve_position: u8,
/// Current blower speed (no unit)
pub blower_rpm: u8,
/// Current battery level in volts (imprecise value)
pub battery_level: u8,
/// [protocol v2] Inspiratory flow in cL/min (SLM * 100)
pub inspiratory_flow: Option<i16>,
/// [protocol v2] Expiratory flow in cL/min (SLM * 100)
pub expiratory_flow: Option<i16>,
}
/// A telemetry message that is sent at the end of every respiratory cycle
#[derive(Debug, Default, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct MachineStateSnapshot {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// Number of the current breathing cycle since MCU booted
pub cycle: u32,
/// Requested peak command in cmH2O
pub peak_command: u8,
/// Requested plateau command in cmH2O
pub plateau_command: u8,
/// Requested PEEP command in cmH2O
pub peep_command: u8,
/// Requested number of cycles per minute
pub cpm_command: u8,
/// Measured peak pressure in mmH2O
pub previous_peak_pressure: u16,
/// Measured pleateau pressure in mmH2O
pub previous_plateau_pressure: u16,
/// Measured PEEP in mmH2O
pub previous_peep_pressure: u16,
/// Codes of the alarms that are currently triggered
pub current_alarm_codes: Vec<u8>,
/// Measured previous_volume in mL (sensor might not be enabled)
pub previous_volume: Option<u16>,
/// Expiration term in the "Inspiration/Expiration" ratio given that Inspiration = 10
pub expiratory_term: u8,
/// State of the trigger
pub trigger_enabled: bool,
/// Trigger offset in mmH2O
pub trigger_offset: u8,
/// [protocol v2] Measured number of cycles per minute
pub previous_cpm: Option<u8>,
/// [protocol v2] State of the alarm snooze
pub alarm_snoozed: Option<bool>,
/// [protocol v2] CPU load in percent
pub cpu_load: Option<u8>,
/// Ventilation mode
pub ventilation_mode: VentilationMode,
/// [protocol v2] Inspiratory trigger flow in percent
pub inspiratory_trigger_flow: Option<u8>,
/// [protocol v2] Expiratory trigger flow in percent
pub expiratory_trigger_flow: Option<u8>,
/// [protocol v2] Minimum duration of inhalation in ms
pub ti_min: Option<u16>,
/// [protocol v2] Maximum duration of inhalation in ms
pub ti_max: Option<u16>,
/// [protocol v2] Threshold for low inspiratory minute volume alarm in L/min
pub low_inspiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for high inspiratory minute volume alarm in L/min
pub high_inspiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for low expiratory minute volume alarm in L/min
pub low_expiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for high expiratory minute volume alarm in L/min
pub high_expiratory_minute_volume_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for low respiratory rate alarm in cycle per minute
pub low_respiratory_rate_alarm_threshold: Option<u8>,
/// [protocol v2] Threshold for high respiratory rate alarm in cycle per minute
pub high_respiratory_rate_alarm_threshold: Option<u8>,
/// [protocol v2] Target tidal volume in mL
pub target_tidal_volume: Option<u16>,
/// [protocol v2] Threshold for low tidal volume in mL
pub low_tidal_volume_alarm_threshold: Option<u16>,
/// [protocol v2] Threshold for high tidal volume in mL
pub high_tidal_volume_alarm_threshold: Option<u16>,
/// [protocol v2] Duration in ms of closing both valves to effectively measure plateau pressure in volume control modes
pub plateau_duration: Option<u16>,
/// [protocol v2] Threshold for leak alarm in cL/min
pub leak_alarm_threshold: Option<u16>,
/// [protocol v2] Target flow during inspiration in L/min
pub target_inspiratory_flow: Option<u8>,
/// [protocol v2] Requested duration of inspiration in ms
pub inspiratory_duration_command: Option<u16>,
/// [protocol v2] Measured duration of inspiration in ms
pub previous_inspiratory_duration: Option<u16>,
/// [protocol v2] Measured battery level value in centivolts (precise value)
pub battery_level: Option<u16>,
/// [protocol v2] Language of the system
pub locale: Option<Locale>,
/// [protocol v2] Patient's height in centimeters
pub patient_height: Option<u8>,
/// [protocol v2] Patient's gender
pub patient_gender: Option<PatientGender>,
/// [protocol v2] Threshold for peak pressure alarm in mmH2O
pub peak_pressure_alarm_threshold: Option<u16>,
}
/// A telemetry message that is sent every time an alarm is triggered or stopped
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct AlarmTrap {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// Number of hundredth of seconds since the begining of the current breathing cycle
pub centile: u16,
/// Current pressure in mmH2O (can be negative)
///
/// _[protocol v2] Changed from u16 to i16 (values above i16::MAX will be assigned the value i16::MAX, but this should not happen)_
pub pressure: i16,
/// Current phase
pub phase: Phase,
/// [obsolete in protocol v2] Current sub-phase
pub subphase: Option<SubPhase>,
/// Number of the current breathing cycle since MCU booted
pub cycle: u32,
/// Code of the alarm
pub alarm_code: u8,
/// Priority level of the alarm
pub alarm_priority: AlarmPriority,
/// `true` if alarm was triggered, `false` if it was stopped
pub triggered: bool,
/// Expected value (unit depends on the alarm)
pub expected: u32,
/// Measured value (unit depends on the alarm)
pub measured: u32,
/// Number of cycle for which this alarm has been triggered
pub cycles_since_trigger: u32,
}
/// An ACK message that is sent every time a setting is changed on the MCU side
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct ControlAck {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// Setting that was changed
pub setting: ControlSetting,
/// New value
pub value: u16,
}
/// [protocol v2] A message sent when a fatal error occurs
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct FatalError {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// Details of the error
pub error: FatalErrorDetails,
}
/// [protocol v2] A message sent during end of line tests
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub struct EolTestSnapshot {
/// Version of the telemetry protocol
pub telemetry_version: u8,
/// Version of the MCU firmware
pub version: String,
/// Internal ID of the MCU
pub device_id: String,
/// Number of microseconds since the MCU booted
pub systick: u64,
/// Current step
pub current_step: EolTestStep,
/// Content of the snapshot
pub content: EolTestSnapshotContent,
}
/// Supported telemetry messages
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
#[cfg_attr(feature = "serde-messages", serde(tag = "message_type"))]
pub enum TelemetryMessage {
/// A telemetry message that is sent once every time the MCU boots
BootMessage(BootMessage),
/// A telemetry message that is sent every 100 ms when the MCU is in "stop" mode
StoppedMessage(StoppedMessage),
/// A telemetry message that is sent every time the firmware does a control iteration (every 10 ms)
DataSnapshot(DataSnapshot),
/// A telemetry message that is sent at the end of every respiratory cycle
MachineStateSnapshot(MachineStateSnapshot),
/// A telemetry message that is sent every time an alarm is triggered or stopped
AlarmTrap(AlarmTrap),
/// An ACK message that is sent every time a setting is changed using the control protocol
ControlAck(ControlAck),
/// [protocol v2] A message sent when a fatal error occurs
FatalError(FatalError),
/// [protocol v2] A message sent during end of line tests
EolTestSnapshot(EolTestSnapshot),
}
impl TelemetryMessage {
/// Version of the telemetry protocol
pub fn telemetry_version(&self) -> u8 {
let val = match self {
Self::BootMessage(BootMessage {
telemetry_version, ..
}) => telemetry_version,
Self::StoppedMessage(StoppedMessage {
telemetry_version, ..
}) => telemetry_version,
Self::DataSnapshot(DataSnapshot {
telemetry_version, ..
}) => telemetry_version,
Self::MachineStateSnapshot(MachineStateSnapshot {
telemetry_version, ..
}) => telemetry_version,
Self::AlarmTrap(AlarmTrap {
telemetry_version, ..
}) => telemetry_version,
Self::ControlAck(ControlAck {
telemetry_version, ..
}) => telemetry_version,
Self::FatalError(FatalError {
telemetry_version, ..
}) => telemetry_version,
Self::EolTestSnapshot(EolTestSnapshot {
telemetry_version, ..
}) => telemetry_version,
};
*val
}
/// Version of the MCU firmware
pub fn version(&self) -> String {
let val = match self {
Self::BootMessage(BootMessage { version, .. }) => version,
Self::StoppedMessage(StoppedMessage { version, .. }) => version,
Self::DataSnapshot(DataSnapshot { version, .. }) => version,
Self::MachineStateSnapshot(MachineStateSnapshot { version, .. }) => version,
Self::AlarmTrap(AlarmTrap { version, .. }) => version,
Self::ControlAck(ControlAck { version, .. }) => version,
Self::FatalError(FatalError { version, .. }) => version,
Self::EolTestSnapshot(EolTestSnapshot { version, .. }) => version,
};
val.clone()
}
/// Internal ID of the MCU
pub fn device_id(&self) -> String {
let val = match self {
Self::BootMessage(BootMessage { device_id, .. }) => device_id,
Self::StoppedMessage(StoppedMessage { device_id, .. }) => device_id,
Self::DataSnapshot(DataSnapshot { device_id, .. }) => device_id,
Self::MachineStateSnapshot(MachineStateSnapshot { device_id, .. }) => device_id,
Self::AlarmTrap(AlarmTrap { device_id, .. }) => device_id,
Self::ControlAck(ControlAck { device_id, .. }) => device_id,
Self::FatalError(FatalError { device_id, .. }) => device_id,
Self::EolTestSnapshot(EolTestSnapshot { device_id, .. }) => device_id,
};
val.clone()
}
/// Number of microseconds since the MCU booted
pub fn systick(&self) -> u64 {
let val = match self {
Self::BootMessage(BootMessage { systick, .. }) => systick,
Self::StoppedMessage(StoppedMessage { systick, .. }) => systick,
Self::DataSnapshot(DataSnapshot { systick, .. }) => systick,
Self::MachineStateSnapshot(MachineStateSnapshot { systick, .. }) => systick,
Self::AlarmTrap(AlarmTrap { systick, .. }) => systick,
Self::ControlAck(ControlAck { systick, .. }) => systick,
Self::FatalError(FatalError { systick, .. }) => systick,
Self::EolTestSnapshot(EolTestSnapshot { systick, .. }) => systick,
};
*val
}
}
/// Extension of Nom's `ErrorKind` to be able to represent CRC errors
#[derive(Debug, Clone, PartialEq)]
pub enum TelemetryErrorKind {
/// Standard Nom error
ParserError(nom::error::VerboseErrorKind),
/// CRC error
CrcError {
/// Expected CRC (included in the message)
expected: u32,
/// Computed CRC (from the actual message)
computed: u32,
},
/// Unsupported protocol (message header contains an unsupported protocol version)
UnsupportedProtocolVersion {
/// Maximum supported version of the telemetry protocol
maximum_supported: u8,
/// Found version of the telemetry protocol
found: u8,
},
}
/// Custom parser error type to leverage `TelemetryErrorKind`
#[derive(Debug, Clone, PartialEq)]
pub struct TelemetryError<I>(pub I, pub TelemetryErrorKind);
impl<I> nom::error::ParseError<I> for TelemetryError<I> {
fn from_error_kind(input: I, kind: nom::error::ErrorKind) -> Self {
TelemetryError(
input,
TelemetryErrorKind::ParserError(nom::error::VerboseErrorKind::Nom(kind)),
)
}
fn append(_: I, _: nom::error::ErrorKind, other: Self) -> Self {
other
}
}
impl<I, E> nom::error::FromExternalError<I, E> for TelemetryError<I> {
fn from_external_error(input: I, kind: nom::error::ErrorKind, _e: E) -> Self {
use nom::error::ParseError;
Self::from_error_kind(input, kind)
}
}
impl<I> From<nom::error::Error<I>> for TelemetryError<I> {
fn from(error: nom::error::Error<I>) -> Self {
TelemetryError(
error.input,
TelemetryErrorKind::ParserError(nom::error::VerboseErrorKind::Nom(error.code)),
)
}
}
/// Errors that need to be reported to the UI
#[derive(Debug, Clone, PartialEq, Eq, Error)]
#[cfg_attr(
feature = "serde-messages",
derive(serde::Serialize, serde::Deserialize)
)]
pub enum HighLevelError {
/// CRC error
#[error("invalid CRC: expected={expected} ≠ computed={computed}")]
CrcError {
/// Expected CRC (included in the message)
expected: u32,
/// Computed CRC (from the actual message)
computed: u32,
},
/// Unsupported protocol (message header contains an unsupported protocol version)
#[error("this message seems to use telemetry protocol version {found} whereas latest supported version is {maximum_supported}")]
UnsupportedProtocolVersion {
/// Maximum supported version of the telemetry protocol
maximum_supported: u8,
/// Found version of the telemetry protocol
found: u8,
},
}
#[cfg(test)]
mod tests {
use crate::structures::AlarmPriority;
use std::cmp::Ordering;
#[test]
fn order_alarm_priority() {
let high = AlarmPriority::High;
let medium = AlarmPriority::Medium;
let low = AlarmPriority::Low;
// equal
assert_eq!(high.cmp(&high), Ordering::Equal);
assert_eq!(medium.cmp(&medium), Ordering::Equal);
assert_eq!(low.cmp(&low), Ordering::Equal);
// lower
assert_eq!(medium.cmp(&high), Ordering::Less);
assert_eq!(low.cmp(&high), Ordering::Less);
assert_eq!(low.cmp(&medium), Ordering::Less);
// greater
assert_eq!(high.cmp(&medium), Ordering::Greater);
assert_eq!(high.cmp(&low), Ordering::Greater);
assert_eq!(medium.cmp(&low), Ordering::Greater);
}
}
| 34.275787 | 135 | 0.646699 |
ff11c2ba8e21a339daf0c5b8eadbf42f0e74016f | 11,107 | use std::collections::{BTreeMap, HashMap, HashSet};
use std::num::NonZeroU64;
use rand::prelude::*;
mod actor;
mod fov;
mod map;
use actor::ActorState;
use geometry::{Direction, Position};
pub mod geometry;
pub use actor::ActorType;
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)]
struct Entity(NonZeroU64);
// TODO: switch to new(1).unwrap() once that's const
const PLAYER: Entity = Entity(unsafe { NonZeroU64::new_unchecked(1) });
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum EntityType {
Actor(ActorType),
Corpse(ActorType),
}
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum Tile {
Wall,
Tree,
Ground,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Obstruction {
Full,
Partial,
None,
}
impl Tile {
pub fn obstruction(self) -> Obstruction {
match self {
Tile::Wall => Obstruction::Full,
Tile::Tree => Obstruction::Partial,
Tile::Ground => Obstruction::None,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum TileView {
Visible {
actor: Option<ActorType>,
object: Option<EntityType>,
tile: Tile,
},
Remembered {
object: Option<EntityType>,
tile: Tile,
},
Explorable,
Unknown,
}
impl TileView {
pub fn actor(&self) -> Option<ActorType> {
match self {
&TileView::Visible { actor, .. } => actor,
_ => None,
}
}
pub fn object(&self) -> Option<EntityType> {
match self {
&TileView::Visible { object, .. } => object,
&TileView::Remembered { object, .. } => object,
_ => None,
}
}
pub fn tile(&self) -> Option<Tile> {
match self {
&TileView::Visible { tile, .. } => Some(tile),
&TileView::Remembered { tile, .. } => Some(tile),
_ => None,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Action {
Wait,
Move(Direction),
Attack(Direction),
MoveAttack(Direction),
}
// Include info on what exactly went wrong in error?
// Per action type errors?
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum ActionError {
IllegalDiagonal,
Impassible,
Occupied,
InvalidActor,
NoValidTarget,
}
type ActionResult<Ok = ()> = Result<Ok, ActionError>;
pub struct Game {
tiles: HashMap<Position, Tile>,
types: HashMap<Entity, EntityType>,
states: BTreeMap<Entity, ActorState>,
// TODO: replace with some sort of indexed map thing
positions: HashMap<Entity, Position>,
actors: HashMap<Position, Entity>,
objects: HashMap<Position, Vec<Entity>>,
visible_ghosts: HashSet<Entity>,
rng: StdRng,
prev_entity: Entity,
view: HashMap<Position, TileView>,
}
impl Game {
pub fn new(seed: u64) -> Game {
Game::new_from_rng(StdRng::seed_from_u64(seed))
}
fn new_from_rng(rng: StdRng) -> Game {
let mut g = Game {
tiles: HashMap::new(),
types: HashMap::new(),
states: BTreeMap::new(),
positions: HashMap::new(),
actors: HashMap::new(),
objects: HashMap::new(),
visible_ghosts: HashSet::new(),
rng: rng,
prev_entity: PLAYER,
view: HashMap::new(),
};
g.types.insert(PLAYER, EntityType::Actor(ActorType::Player));
map::generate_basin(&mut g);
// TODO: handle errors
let _ = g.set_actor_position(PLAYER, Position { x: 0, y: 0 });
fov::update_view(&mut g);
actor::notice_player(&mut g);
g
}
pub fn restart(&mut self) {
*self = Game::new_from_rng(self.rng.clone());
}
pub fn view(&self, pos: Position) -> TileView {
self.view.get(&pos).cloned().unwrap_or(TileView::Unknown)
}
pub fn player_position(&self) -> Option<Position> {
self.positions.get(&PLAYER).cloned()
}
pub fn take_player_action(&mut self, action: Action) -> ActionResult {
self.take_action(PLAYER, action)?;
fov::update_view(self);
actor::take_actions(self);
fov::update_view(self);
actor::notice_player(self);
Ok(())
}
fn tile(&self, pos: Position) -> Tile {
self.tiles.get(&pos).cloned().unwrap_or(Tile::Wall)
}
fn take_action(&mut self, e: Entity, action: Action) -> ActionResult {
let actor_type = match self.types.get(&e) {
Some(&EntityType::Actor(a)) => a,
_ => { return Err(ActionError::InvalidActor); }
};
let pos = self.positions.get(&e).cloned().ok_or(ActionError::InvalidActor)?;
// TODO: at some point the various checks used could leak info, so should consume a turn
// (and update known map information) if you don't already know they're invalid
match action {
Action::Wait => {}
Action::Move(dir) => {
// TODO: ghost should maybe be able to move through walls or something
if actor_type == ActorType::Crab && !dir.is_orthogonal() {
return Err(ActionError::IllegalDiagonal);
}
if let Some((a, b)) = match dir {
Direction::NorthEast => Some((Direction::North, Direction::East)),
Direction::SouthEast => Some((Direction::South, Direction::East)),
Direction::SouthWest => Some((Direction::South, Direction::West)),
Direction::NorthWest => Some((Direction::North, Direction::West)),
_ => None,
} {
if self.tile(pos.step(a)).obstruction() == Obstruction::Full
&& self.tile(pos.step(b)).obstruction() == Obstruction::Full
{
return Err(ActionError::IllegalDiagonal);
}
}
self.set_actor_position(e, pos.step(dir))?;
}
Action::Attack(dir) => {
let target_pos = pos.step(dir);
if let Some(&target) = self.actors.get(&target_pos) {
if let Some(&EntityType::Actor(target_type)) = self.types.get(&target) {
// TODO: not being able to attack crabs diagonally could cause them get
// stuck on terrain and be unkillable, but I do like the idea of making them
// a bit harder to kill to compensate for being less mobile
// I could do beetle style knockback when hitting them diagonally, but I'm
// not sure if that's what I really want
if (actor_type == ActorType::Crab || target_type == ActorType::Crab) && !dir.is_orthogonal() {
return Err(ActionError::IllegalDiagonal);
}
if actor_type == ActorType::Player || target_type == ActorType::Player {
// TODO: chain push multiple beetles?
if target_type == ActorType::Beetle {
match self.take_action(target, Action::Move(dir)) {
Ok(_) => {
self.states.insert(target, ActorState::Wait);
return Ok(());
}
Err(ActionError::Occupied) => {
if let Some(&victim) = self.actors.get(&target_pos.step(dir)) {
// TODO: use force attack action so know what tried to kill victim?
let _ = self.kill_actor(victim);
let _ = self.take_action(target, Action::Move(dir));
}
}
Err(_) => {}
}
}
self.kill_actor(target)?;
return Ok(());
}
}
}
return Err(ActionError::NoValidTarget);
}
Action::MoveAttack(dir) => {
let result = self.take_action(e, Action::Move(dir));
if result == Err(ActionError::Occupied) {
self.take_action(e, Action::Attack(dir))?;
} else {
result?;
}
}
};
Ok(())
}
fn kill_actor(&mut self, e: Entity) -> ActionResult {
// TODO: some type system level stuff to avoid having to revalidate this stuff
let actor_type = match self.types.get(&e) {
Some(&EntityType::Actor(a)) => a,
_ => { return Err(ActionError::InvalidActor); }
};
self.states.remove(&e);
self.types.insert(e, EntityType::Corpse(actor_type));
if let Some(&pos) = self.positions.get(&e) {
self.actors.remove(&pos);
if actor_type != ActorType::Ghost {
self.objects.entry(pos).or_insert_with(Vec::new).push(e);
}
if actor_type == ActorType::BigJelly {
for &dir in &geometry::ORTHOGONAL_DIRECTIONS {
let _ = self.spawn_actor(ActorType::LittleJelly, pos.step(dir));
}
}
}
return Ok(());
}
fn new_entity(&mut self, entity_type: EntityType) -> Entity {
if let Some(id) = self.prev_entity.0.get().checked_add(1).and_then(NonZeroU64::new) {
let new_entity = Entity(id);
self.prev_entity = new_entity;
self.types.insert(new_entity, entity_type);
new_entity
} else {
// TODO: not actually unreachable...
unreachable!();
}
}
fn set_actor_position(&mut self, e: Entity, pos: Position) -> ActionResult<Option<Position>> {
let new_tile = self.tile(pos);
if new_tile.obstruction() != Obstruction::None {
return Err(ActionError::Impassible);
}
if let Some(&other) = self.actors.get(&pos) {
if other == e {
return Ok(Some(pos));
} else {
return Err(ActionError::Occupied);
}
}
let old_pos = self.positions.insert(e, pos);
if let Some(old_pos) = old_pos {
self.actors.remove(&old_pos);
}
self.actors.insert(pos, e);
Ok(old_pos)
}
fn spawn_actor(&mut self, t: ActorType, pos: Position) -> ActionResult<Entity> {
let e = self.new_entity(EntityType::Actor(t));
// TODO: don't leak entity on invalid placement
self.set_actor_position(e, pos)?;
if t != ActorType::Player {
self.states.insert(e, ActorState::Wait);
}
Ok(e)
}
}
| 33.759878 | 118 | 0.516521 |
7640dd722169a63c8619e2f666f8eda387258d9d | 89 | pub mod production_module;
pub mod construct;
pub mod amount;
pub mod construct_position; | 22.25 | 27 | 0.831461 |
71b062289ccabf53da9cefd58d81859dfb7e3cb6 | 8,833 | use std::io::{Read, Write};
use std::fmt::{self, Display, Debug};
use std::mem::{self, MaybeUninit};
use std::os::raw::{c_void, c_uint, c_int};
use std::ptr;
use fdk_aac_sys as sys;
pub use sys::AACENC_InfoStruct as InfoStruct;
pub enum EncoderError {
Io(std::io::Error),
FdkAac(sys::AACENC_ERROR),
}
impl EncoderError {
fn message(&self) -> &'static str {
match self {
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INVALID_HANDLE) => "Handle passed to function call was invalid.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_MEMORY_ERROR) => "Memory allocation failed.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_UNSUPPORTED_PARAMETER) => "Parameter not available.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INVALID_CONFIG) => "Configuration not provided.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INIT_ERROR) => "General initialization error.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INIT_AAC_ERROR) => "AAC library initialization error.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INIT_SBR_ERROR) => "SBR library initialization error.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INIT_TP_ERROR) => "Transport library initialization error.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INIT_META_ERROR) => "Meta data library initialization error.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_INIT_MPS_ERROR) => "MPS library initialization error.",
EncoderError::FdkAac(sys::AACENC_ERROR_AACENC_ENCODE_ERROR) => "The encoding process was interrupted by an unexpected error.",
EncoderError::FdkAac(_) => "Unknown error",
EncoderError::Io(_e) => "io error",
}
}
fn code(&self) -> u32 {
match self {
EncoderError::FdkAac(code) => *code,
EncoderError::Io(_e) => 0,
}
}
}
impl std::error::Error for EncoderError {
}
impl Debug for EncoderError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EncoderError {{ code: {:?}, message: {:?} }}", self.code(), self.message())
}
}
impl Display for EncoderError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.message())
}
}
impl From<std::io::Error> for EncoderError {
fn from(err: std::io::Error) -> Self {
EncoderError::Io(err)
}
}
fn check(e: sys::AACENC_ERROR) -> Result<(), EncoderError> {
if e == sys::AACENC_ERROR_AACENC_OK {
Ok(())
} else {
Err(EncoderError::FdkAac(e))
}
}
struct EncoderHandle {
ptr: sys::HANDLE_AACENCODER,
}
impl EncoderHandle {
pub fn alloc(max_modules: usize, max_channels: usize) -> Result<Self, EncoderError> {
let mut ptr: sys::HANDLE_AACENCODER = ptr::null_mut();
check(unsafe {
sys::aacEncOpen(&mut ptr as *mut _, max_modules as c_uint, max_channels as c_uint)
})?;
Ok(EncoderHandle { ptr })
}
}
impl Drop for EncoderHandle {
fn drop(&mut self) {
unsafe { sys::aacEncClose(&mut self.ptr as *mut _); }
}
}
#[derive(Debug, Clone, Copy)]
pub enum BitRate {
Cbr(u32),
VbrVeryLow,
VbrLow,
VbrMedium,
VbrHigh,
VbrVeryHigh,
}
pub struct EncoderParams {
pub bit_rate: BitRate,
pub sample_rate: u32,
pub transport: Transport,
}
pub struct Encoder {
handle: EncoderHandle,
}
#[derive(Debug)]
pub enum Transport {
Adts,
Raw,
}
#[derive(Debug)]
pub struct EncodeInfo {
pub input_consumed: usize,
pub output_size: usize,
}
impl Encoder {
pub fn new(params: EncoderParams) -> Result<Self, EncoderError> {
let handle = EncoderHandle::alloc(0, 2 /* hardcode stereo */)?;
unsafe {
// hardcode MPEG-4 AAC Low Complexity for now:
check(sys::aacEncoder_SetParam(handle.ptr, sys::AACENC_PARAM_AACENC_AOT, 2))?;
let bitrate_mode = match params.bit_rate {
BitRate::Cbr(bitrate) => {
check(sys::aacEncoder_SetParam(handle.ptr, sys::AACENC_PARAM_AACENC_BITRATE, bitrate))?;
0
}
BitRate::VbrVeryLow => 1,
BitRate::VbrLow => 2,
BitRate::VbrMedium => 3,
BitRate::VbrHigh => 4,
BitRate::VbrVeryHigh => 5,
};
check(sys::aacEncoder_SetParam(handle.ptr, sys::AACENC_PARAM_AACENC_BITRATEMODE, bitrate_mode))?;
check(sys::aacEncoder_SetParam(handle.ptr, sys::AACENC_PARAM_AACENC_SAMPLERATE, params.sample_rate))?;
check(sys::aacEncoder_SetParam(handle.ptr, sys::AACENC_PARAM_AACENC_TRANSMUX, match params.transport {
Transport::Adts => 2,
Transport::Raw => 0,
}))?;
// hardcode SBR off for now
check(sys::aacEncoder_SetParam(handle.ptr, sys::AACENC_PARAM_AACENC_SBR_MODE, 0))?;
// hardcode stereo
check(sys::aacEncoder_SetParam(handle.ptr, sys::AACENC_PARAM_AACENC_CHANNELMODE, 2))?;
// call encode once with all null params according to docs
check(sys::aacEncEncode(handle.ptr, ptr::null(), ptr::null(), ptr::null(), ptr::null_mut()))?;
}
Ok(Encoder { handle })
}
pub fn info(&self) -> Result<InfoStruct, EncoderError> {
let mut info = MaybeUninit::uninit();
check(unsafe { sys::aacEncInfo(self.handle.ptr, info.as_mut_ptr()) })?;
Ok(unsafe { info.assume_init() })
}
pub fn encode<R: Read, W: Write>(&self, input: &mut R, output: &mut W) -> Result<EncodeInfo, EncoderError> {
let info = self.info()?;
let channels = 2; // hard-coded to stereo
let buffer_len = 2*channels*info.frameLength as usize;
let mut input_buffer = vec![0; buffer_len];
let mut output_buffer = vec![0; buffer_len];
let mut total_consumed_samples = 0;
let mut total_written_bytes = 0;
loop {
let input_len = input.read(&mut input_buffer)?;
if input_len == 0 {
break;
}
let mut input_buf = input_buffer.as_ptr() as *mut i16;
let mut input_buf_ident: c_int = sys::AACENC_BufferIdentifier_IN_AUDIO_DATA as c_int;
let mut input_buf_size: c_int = input_len as c_int;
let mut input_buf_el_size: c_int = mem::size_of::<i16>() as c_int;
let input_desc = sys::AACENC_BufDesc {
numBufs: 1,
bufs: &mut input_buf as *mut _ as *mut *mut c_void,
bufferIdentifiers: &mut input_buf_ident as *mut c_int,
bufSizes: &mut input_buf_size as *mut c_int,
bufElSizes: &mut input_buf_el_size as *mut c_int,
};
let mut output_buf = output_buffer.as_mut_ptr();
let mut output_buf_ident: c_int = sys::AACENC_BufferIdentifier_OUT_BITSTREAM_DATA as c_int;
let mut output_buf_size: c_int = output_buffer.len() as c_int;
let mut output_buf_el_size: c_int = mem::size_of::<i16>() as c_int;
let output_desc = sys::AACENC_BufDesc {
numBufs: 1,
bufs: &mut output_buf as *mut _ as *mut *mut c_void,
bufferIdentifiers: &mut output_buf_ident as *mut _,
bufSizes: &mut output_buf_size as *mut _,
bufElSizes: &mut output_buf_el_size as *mut _,
};
let in_args = sys::AACENC_InArgs {
numInSamples: input_len as i32 / 2,
numAncBytes: 0,
};
let mut out_args = unsafe { mem::zeroed() };
let code = unsafe {
sys::aacEncEncode(
self.handle.ptr,
&input_desc,
&output_desc,
&in_args,
&mut out_args,
)
};
if code != sys::AACENC_ERROR_AACENC_OK {
if code == sys::AACENC_ERROR_AACENC_ENCODE_EOF {
break;
}
return Err(EncoderError::FdkAac(code));
}
let input_consumed = out_args.numInSamples as usize;
let output_size = out_args.numOutBytes as usize;
output.write(&output_buffer[0..output_size])?;
total_consumed_samples += input_consumed;
total_written_bytes += output_size;
}
Ok(EncodeInfo {
output_size: total_written_bytes,
input_consumed: total_consumed_samples,
})
}
}
impl Debug for Encoder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Encoder {{ handle: {:?} }}", self.handle.ptr)
}
}
| 34.36965 | 138 | 0.592664 |
f985f018884259cf232629f1390a0bf7e48bd758 | 797 | //! Caching of [`NamespaceSchema`].
mod memory;
pub use memory::*;
mod sharded_cache;
pub use sharded_cache::*;
pub mod metrics;
use data_types2::{DatabaseName, NamespaceSchema};
use std::{fmt::Debug, sync::Arc};
/// An abstract cache of [`NamespaceSchema`].
pub trait NamespaceCache: Debug + Send + Sync {
/// Return the [`NamespaceSchema`] for `namespace`.
fn get_schema(&self, namespace: &DatabaseName<'_>) -> Option<Arc<NamespaceSchema>>;
/// Place `schema` in the cache, unconditionally overwriting any existing
/// [`NamespaceSchema`] mapped to `namespace`, returning
/// the previous value, if any
fn put_schema(
&self,
namespace: DatabaseName<'static>,
schema: impl Into<Arc<NamespaceSchema>>,
) -> Option<Arc<NamespaceSchema>>;
}
| 28.464286 | 87 | 0.676286 |
114af315f0ac65d18d9c2682ee2ae2a77290efaa | 2,574 | use crate::search::*;
use crate::util::*;
/// Returns documents that contain an indexed value for a field.
///
/// An indexed value may not exist for a document’s field due to a variety of reasons:
///
/// - The field in the source JSON is `null` or `[]`
/// - The field has `"index" : false` set in the mapping
/// - The length of the field value exceeded an `ignore_above` setting in the mapping
/// - The field value was malformed and `ignore_malformed` was defined in the mapping
///
/// To create exists query:
/// ```
/// # use elasticsearch_dsl::queries::*;
/// # use elasticsearch_dsl::queries::params::*;
/// # let query =
/// Query::exists("test");
/// ```
/// <https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-exists-query.html>
#[derive(Debug, Clone, PartialEq, Serialize)]
pub struct ExistsQuery {
#[serde(rename = "exists")]
inner: Inner,
}
#[derive(Debug, Clone, PartialEq, Serialize)]
struct Inner {
field: String,
#[serde(skip_serializing_if = "ShouldSkip::should_skip")]
boost: Option<Boost>,
#[serde(skip_serializing_if = "ShouldSkip::should_skip")]
_name: Option<String>,
}
impl Query {
/// Creates an instance of [`ExistsQuery`]
///
/// - `field` - Name of the field you wish to search.
/// While a field is deemed non-existent if the JSON value is `null` or `[]`,
/// these values will indicate the field does exist:
/// - Empty strings, such as `""` or `"-"`
/// - Arrays containing `null` and another value, such as `[null, "foo"]`
/// - A custom [`null-value`](https://www.elastic.co/guide/en/elasticsearch/reference/current/null-value.html), defined in field mapping
pub fn exists(field: impl Into<String>) -> ExistsQuery {
ExistsQuery {
inner: Inner {
field: field.into(),
boost: None,
_name: None,
},
}
}
}
impl ExistsQuery {
add_boost_and_name!();
}
impl ShouldSkip for ExistsQuery {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serialization() {
assert_serialize(
Query::exists("test"),
json!({
"exists": {
"field": "test"
}
}),
);
assert_serialize(
Query::exists("test").boost(2).name("test"),
json!({
"exists": {
"field": "test",
"boost": 2,
"_name": "test"
}
}),
);
}
}
| 28.285714 | 142 | 0.559052 |
9bca0603f7fab07ea811a89166a19372ead9a7d2 | 10,261 | use futures::{ready, Future, FutureExt};
use std::{
clone::Clone,
error::Error,
fmt, mem,
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex,
},
task::{Context, Poll},
};
use tokio::{
sync::{mpsc, oneshot},
task::JoinHandle,
};
use super::{
port_allocator::{PortAllocator, PortNumber},
receiver::Receiver,
sender::Sender,
};
/// An error occurred during connecting to a remote service.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum ConnectError {
/// All local ports are in use.
LocalPortsExhausted,
/// All remote ports are in use.
RemotePortsExhausted,
/// Too many connection requests are pending.
TooManyPendingConnectionRequests,
/// Connection has been rejected by server.
Rejected,
/// A multiplexer error has occurred or it has been terminated.
ChMux,
}
impl fmt::Display for ConnectError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::LocalPortsExhausted => write!(f, "all local ports are in use"),
Self::RemotePortsExhausted => write!(f, "all remote ports are in use"),
Self::TooManyPendingConnectionRequests => write!(f, "too many connection requests are pending"),
Self::Rejected => write!(f, "connection has been rejected by server"),
Self::ChMux => write!(f, "multiplexer error"),
}
}
}
impl Error for ConnectError {}
impl From<ConnectError> for std::io::Error {
fn from(err: ConnectError) -> Self {
use std::io::ErrorKind;
match err {
ConnectError::LocalPortsExhausted => Self::new(ErrorKind::AddrInUse, err.to_string()),
ConnectError::RemotePortsExhausted => Self::new(ErrorKind::AddrInUse, err.to_string()),
ConnectError::TooManyPendingConnectionRequests => Self::new(ErrorKind::AddrInUse, err.to_string()),
ConnectError::Rejected => Self::new(ErrorKind::ConnectionRefused, err.to_string()),
ConnectError::ChMux => Self::new(ErrorKind::ConnectionReset, err.to_string()),
}
}
}
/// Accounts connection request credits.
#[derive(Clone)]
struct ConntectRequestCrediter(Arc<Mutex<ConntectRequestCrediterInner>>);
struct ConntectRequestCrediterInner {
limit: u16,
used: u16,
notify_tx: Vec<oneshot::Sender<()>>,
}
impl ConntectRequestCrediter {
/// Creates a new connection request crediter.
pub fn new(limit: u16) -> Self {
let inner = ConntectRequestCrediterInner { limit, used: 0, notify_tx: Vec::new() };
Self(Arc::new(Mutex::new(inner)))
}
/// Obtains a connection request credit.
///
/// Waits for the credit to become available.
pub async fn request(&self) -> ConnectRequestCredit {
loop {
let rx = {
let mut inner = self.0.lock().unwrap();
if inner.used < inner.limit {
inner.used += 1;
return ConnectRequestCredit(self.0.clone());
} else {
let (tx, rx) = oneshot::channel();
inner.notify_tx.push(tx);
rx
}
};
let _ = rx.await;
}
}
/// Tries to obtain a connection request credit.
///
/// Does not wait for the credit to become available.
pub fn try_request(&self) -> Option<ConnectRequestCredit> {
let mut inner = self.0.lock().unwrap();
if inner.used < inner.limit {
inner.used += 1;
Some(ConnectRequestCredit(self.0.clone()))
} else {
None
}
}
}
/// A credit for requesting a connection.
pub(crate) struct ConnectRequestCredit(Arc<Mutex<ConntectRequestCrediterInner>>);
impl Drop for ConnectRequestCredit {
fn drop(&mut self) {
let notify_tx = {
let mut inner = self.0.lock().unwrap();
inner.used -= 1;
mem::take(&mut inner.notify_tx)
};
for tx in notify_tx {
let _ = tx.send(());
}
}
}
/// Connection to remote service request to local multiplexer.
#[derive(Debug)]
pub(crate) struct ConnectRequest {
/// Local port.
pub local_port: PortNumber,
/// Notification that request has been queued for sending.
pub sent_tx: mpsc::Sender<()>,
/// Response channel sender.
pub response_tx: oneshot::Sender<ConnectResponse>,
/// Wait for port to become available.
pub wait: bool,
}
/// Connection to remote service response from local multiplexer.
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
pub(crate) enum ConnectResponse {
/// Connection accepted and channel opened.
Accepted(Sender, Receiver),
/// Connection was rejected.
Rejected {
/// Remote endpoint had not ports available.
no_ports: bool,
},
}
/// An outstanding connection request.
///
/// Await it to obtain the result of the connection request.
pub struct Connect {
pub(crate) sent_rx: mpsc::Receiver<()>,
pub(crate) response: JoinHandle<Result<(Sender, Receiver), ConnectError>>,
}
impl Connect {
/// Returns once the connect request has been sent.
///
/// It is guaranteed that the connect request will be made available via
/// the [Listener](super::Listener) at the remote endpoint before messages
/// sent on any port after this function returns will arrive.
///
/// This will also return when the multiplexer has been terminated.
pub async fn sent(&mut self) {
let _ = self.sent_rx.recv().await;
}
}
impl Future for Connect {
type Output = Result<(Sender, Receiver), ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let result = ready!(Pin::into_inner(self).response.poll_unpin(cx));
Poll::Ready(result.map_err(|_| ConnectError::ChMux)?)
}
}
/// Multiplexer client.
///
/// Use to request a new port for sending and receiving.
/// This can be cloned to make simultaneous requests.
#[derive(Clone)]
pub struct Client {
tx: mpsc::UnboundedSender<ConnectRequest>,
crediter: ConntectRequestCrediter,
port_allocator: PortAllocator,
listener_dropped: Arc<AtomicBool>,
terminate_tx: mpsc::UnboundedSender<()>,
}
impl fmt::Debug for Client {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Client").field("port_allocator", &self.port_allocator).finish()
}
}
impl Client {
pub(crate) fn new(
tx: mpsc::UnboundedSender<ConnectRequest>, limit: u16, port_allocator: PortAllocator,
listener_dropped: Arc<AtomicBool>, terminate_tx: mpsc::UnboundedSender<()>,
) -> Client {
Client {
tx,
crediter: ConntectRequestCrediter::new(limit),
port_allocator,
listener_dropped,
terminate_tx,
}
}
/// Obtains the port allocator.
pub fn port_allocator(&self) -> PortAllocator {
self.port_allocator.clone()
}
/// Connects to a newly allocated remote port from a newly allocated local port.
///
/// This function waits until a local and remote port become available.
pub async fn connect(&self) -> Result<(Sender, Receiver), ConnectError> {
self.connect_ext(None, true).await?.await
}
/// Start opening a new port to the remote endpoint with extended options.
///
/// If `local_port` is [None] a new local port number is allocated.
/// Otherwise the specified port is used.
///
/// If `wait` is true, this function waits until a local and remote port become available.
/// Otherwise it returns the appropriate [ConnectError] if no ports are available.
/// If `wait` is false, it still waits until the listener on the remote endpoint accepts
/// or rejects the connection.
///
/// This returns a [Connect] that must be awaited to obtain the result.
pub async fn connect_ext(&self, local_port: Option<PortNumber>, wait: bool) -> Result<Connect, ConnectError> {
// Obtain local port.
let local_port = match local_port {
Some(local_port) => local_port,
None => {
if wait {
self.port_allocator.allocate().await
} else {
self.port_allocator.try_allocate().ok_or(ConnectError::LocalPortsExhausted)?
}
}
};
// Obtain credit for connection request.
let credit = if wait {
self.crediter.request().await
} else {
match self.crediter.try_request() {
Some(credit) => credit,
None => return Err(ConnectError::TooManyPendingConnectionRequests),
}
};
// Build and send request.
let (sent_tx, sent_rx) = mpsc::channel(1);
let (response_tx, response_rx) = oneshot::channel();
let req = ConnectRequest { local_port, sent_tx, response_tx, wait };
let _ = self.tx.send(req);
let listener_dropped = self.listener_dropped.clone();
let response = tokio::spawn(async move {
// Credit must be kept until response is received.
let _credit = credit;
// Process response.
match response_rx.await {
Ok(ConnectResponse::Accepted(sender, receiver)) => Ok((sender, receiver)),
Ok(ConnectResponse::Rejected { no_ports }) => {
if no_ports {
Err(ConnectError::RemotePortsExhausted)
} else {
Err(ConnectError::Rejected)
}
}
Err(_) => {
if listener_dropped.load(Ordering::SeqCst) {
Err(ConnectError::Rejected)
} else {
Err(ConnectError::ChMux)
}
}
}
});
Ok(Connect { sent_rx, response })
}
/// Terminates the multiplexer, forcibly closing all open ports.
pub fn terminate(&self) {
let _ = self.terminate_tx.send(());
}
}
| 32.887821 | 114 | 0.600819 |
337b37ed4609b1e75bbee69f58d4e24cd31e764a | 3,015 | use super::{
coordinates::Coordinates,
factory::{
create_canvas, create_resize_warning_paragraph, WINDOW_MIN_HEIGHT, WINDOW_MIN_WIDTH,
},
renderer::Renderer,
viewport::Viewport,
};
use crate::{
game::{game_item::GameItem, world::World},
view::{
factory::{
create_actors_block, create_actors_viewport, create_background_block, create_ui_block,
create_ui_viewport, split_into_actors_and_ui,
},
session::Session,
},
};
use anyhow::Result;
use tui::{
backend::Backend,
layout::Rect,
widgets::{canvas::Context, Block},
Frame,
};
const MIN_CANVAS_LENGTH: u16 = 3;
pub trait Renderable {
fn render(&self, renderer: &mut Renderer);
fn viewport(&self) -> Viewport;
}
pub fn render(session: &mut Session, world: &mut World) -> Result<()> {
session.terminal.draw(|frame| {
let window = frame.size();
let (actors_rect, ui_rect) = split_into_actors_and_ui(window);
if should_resize_window(window) {
frame.render_widget(create_resize_warning_paragraph(), window);
return;
}
let actors_viewport = create_actors_viewport(actors_rect);
let ui_viewport = create_ui_viewport(ui_rect);
world
.broadcast_actors_viewport(actors_viewport)
.expect("Broadcast Actors Viewport succeeds");
world
.broadcast_ui_viewport(ui_viewport)
.expect("Broadcast UI Viewport succeeds");
render_background(frame);
render_canvas(
frame,
&world.actors,
create_actors_block(),
world.offset,
actors_rect,
actors_viewport,
);
render_canvas(
frame,
&world.ui,
create_ui_block(),
Coordinates::default(),
ui_rect,
ui_viewport,
);
})?;
Ok(())
}
fn render_background<B: Backend>(frame: &mut Frame<B>) {
// Set the background color of the *entire* terminal window, even outside of the canvas'.
frame.render_widget(create_background_block(), frame.size());
}
fn render_canvas<B: Backend>(
frame: &mut Frame<B>,
renderables: &[Box<dyn GameItem>],
block: Block,
offset: Coordinates,
rect: Rect,
viewport: Viewport,
) {
if is_canvas_too_small_to_render(rect) {
return;
}
let mut canvas = create_canvas(block, viewport);
canvas = canvas.paint(|ctx: &mut Context| {
let mut renderer = Renderer::new(ctx, offset, viewport);
for renderable in renderables.iter() {
renderable.render(&mut renderer);
}
});
frame.render_widget(canvas, rect);
}
fn is_canvas_too_small_to_render(canvas: Rect) -> bool {
canvas.width < MIN_CANVAS_LENGTH || canvas.height < MIN_CANVAS_LENGTH
}
fn should_resize_window(window: Rect) -> bool {
window.height < u16::from(WINDOW_MIN_HEIGHT) || window.width < u16::from(WINDOW_MIN_WIDTH)
}
| 27.916667 | 98 | 0.621891 |
1eda9eccc19697ed6b64957aa237db6bc5252db1 | 5,721 | //! This build script is responsible for ensuring that if parser targets wasm,
//! the JS Parser package is available at the expected location for
//! `wasm_bindgen` tool.
// === Features ===
#![feature(option_result_contains)]
use std::io::prelude::*;
use enso_build_utilities::absolute_path;
use enso_build_utilities::targeting_wasm;
use enso_build_utilities::PathRef;
use std::fs;
use std::fs::create_dir_all;
use std::fs::File;
use std::path::PathBuf;
// =========================
// == Hardcoded constants ==
// =========================
/// Where the crate expects to find file with compiled parser.
/// Path relative to the crate directory.
const PARSER_PATH: &str = "./pkg/scala-parser.js";
/// Commit from `enso` repository that will be used to obtain parser from.
const PARSER_COMMIT: &str = "649fe33ccf148d47deb6ba6a06f3babc48078e3e";
/// Magic code that needs to be prepended to ScalaJS generated parser due to:
/// https://github.com/scala-js/scala-js/issues/3677/
const PARSER_PREAMBLE: &str = "var __ScalaJSEnv = { global: window };";
/// Obtains a URL where this parser version can be downloaded.
pub fn parser_url(version: &ParserVersion) -> reqwest::Url {
let url_string = format!(
"https://packages.luna-lang.org/parser-js/nightly/{}/scala-parser.js",
version.commit
);
let invalid_url_msg = format!("{} is an invalid URL.", url_string);
reqwest::Url::parse(&url_string).expect(&invalid_url_msg)
}
// ===================
// == ParserVersion ==
// ===================
/// Parser version described as commit hash from `enso` repository.
#[derive(Clone, Debug, PartialEq)]
pub struct ParserVersion {
pub commit: String,
}
impl ParserVersion {
/// Create a version described by given commit hash.
pub fn from_commit(commit: String) -> ParserVersion {
ParserVersion { commit }
}
/// The JS parser version required for this crate.
pub fn required() -> ParserVersion {
ParserVersion { commit: PARSER_COMMIT.into() }
}
}
// ========================
// == Downloading parser ==
// ========================
/// Stores information which parser version should be provided where.
///
/// Implementation provides methods that download desired parser version, patch it and store to the
/// file, so parser can be consumed by `wasm_bindgen`.
struct ParserProvider {
/// Required parser version.
version: ParserVersion,
/// The path where JS file needs to be provided.
parser_path: PathBuf,
}
impl ParserProvider {
/// Creates a provider that obtains given parser version to a given path.
pub fn new(version: ParserVersion, parser_path: impl PathRef) -> ParserProvider {
let parser_path = PathBuf::from(parser_path.as_ref());
ParserProvider { version, parser_path }
}
/// Downloads contents of JS parser into memory.
pub async fn download(&self) -> bytes::Bytes {
let url = parser_url(&self.version);
let get_error = format!("Failed to get response from {}.", url);
let download_error = format!("Failed to download contents of {}.", url);
let server_error = format!("Server replied with error when getting {}.", url);
let response = reqwest::get(url).await.expect(&get_error);
let response = response.error_for_status().expect(&server_error);
response.bytes().await.expect(&download_error)
}
/// Stores JS parser into file, after patching with a `PARSER_PREAMBLE`.
pub fn patch_and_store(&self, js_parser: bytes::Bytes) {
let display_path = self.parser_path.display();
let open_error = format!("Failed to open {}.", display_path);
let write_error = format!("Failed to write {}.", display_path);
let flush_error = format!("Failed to flush {}.", display_path);
let mut file = File::create(&self.parser_path).expect(&open_error);
file.write_all(PARSER_PREAMBLE.as_bytes()).expect(&write_error);
file.write_all(&js_parser).expect(&write_error);
file.flush().expect(&flush_error);
}
/// Ensures that target's parent directory exists.
pub fn prepare_target_location(&self) {
let parent_directory =
self.parser_path.parent().expect("Unable to access parent directory.");
let create_dir_error =
format!("Failed to create directory: {}.", parent_directory.display());
create_dir_all(parent_directory).expect(&create_dir_error);
}
/// Places required parser version in the target location.
pub async fn run(&self) {
self.prepare_target_location();
let parent_directory =
self.parser_path.parent().expect("Unable to access parent directory.");
let fingerprint = parent_directory.join("parser.fingerprint");
let opt_version = fs::read_to_string(&fingerprint);
let changed = match opt_version {
Err(_) => true,
Ok(hash) => hash != PARSER_COMMIT,
};
if changed {
let parser_js = self.download().await;
self.patch_and_store(parser_js);
fs::write(&fingerprint, PARSER_COMMIT).expect("Unable to write parser fingerprint.");
}
}
}
// ==========
// == main ==
// ==========
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
if targeting_wasm() {
let required_version = ParserVersion::required();
let parser_path = absolute_path(PARSER_PATH)?;
let provider = ParserProvider::new(required_version, &parser_path);
provider.run().await;
}
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed={}", PARSER_PATH);
Ok(())
}
| 34.884146 | 99 | 0.648313 |
91acdf360385ce6cbe6351341ab08a2bca39e586 | 1,731 | use crate::bls::api::{cess_BLSDigest, cess_BLSPrivateKey, cess_BLSPublicKey, cess_BLSSignature};
/// HashResponse
#[repr(C)]
pub struct cess_HashResponse {
pub digest: cess_BLSDigest,
}
#[no_mangle]
pub unsafe extern "C" fn cess_destroy_hash_response(ptr: *mut cess_HashResponse) {
let _ = Box::from_raw(ptr);
}
/// AggregateResponse
#[repr(C)]
pub struct cess_AggregateResponse {
pub signature: cess_BLSSignature,
}
#[no_mangle]
pub unsafe extern "C" fn cess_destroy_aggregate_response(ptr: *mut cess_AggregateResponse) {
let _ = Box::from_raw(ptr);
}
/// PrivateKeyGenerateResponse
#[repr(C)]
pub struct cess_PrivateKeyGenerateResponse {
pub private_key: cess_BLSPrivateKey,
}
#[no_mangle]
pub unsafe extern "C" fn cess_destroy_private_key_generate_response(
ptr: *mut cess_PrivateKeyGenerateResponse,
) {
let _ = Box::from_raw(ptr);
}
/// PrivateKeySignResponse
#[repr(C)]
pub struct cess_PrivateKeySignResponse {
pub signature: cess_BLSSignature,
}
#[no_mangle]
pub unsafe extern "C" fn cess_destroy_private_key_sign_response(
ptr: *mut cess_PrivateKeySignResponse,
) {
let _ = Box::from_raw(ptr);
}
/// PrivateKeyPublicKeyResponse
#[repr(C)]
pub struct cess_PrivateKeyPublicKeyResponse {
pub public_key: cess_BLSPublicKey,
}
#[no_mangle]
pub unsafe extern "C" fn cess_destroy_private_key_public_key_response(
ptr: *mut cess_PrivateKeyPublicKeyResponse,
) {
let _ = Box::from_raw(ptr);
}
/// AggregateResponse
#[repr(C)]
pub struct cess_ZeroSignatureResponse {
pub signature: cess_BLSSignature,
}
#[no_mangle]
pub unsafe extern "C" fn cess_destroy_zero_signature_response(
ptr: *mut cess_ZeroSignatureResponse,
) {
let _ = Box::from_raw(ptr);
}
| 21.109756 | 96 | 0.745234 |
d6bef080180dc6a3d5d9e887ec2b8bc02d5ac8c3 | 9,669 | use lazy_static::lazy_static;
use rusb::{Context, DeviceHandle, Error, UsbContext};
use std::time::Duration;
use crate::probe::icdi::IcdiError;
use std::collections::HashMap;
use super::tools::{is_icdi_device, read_serial_number};
use crate::{
probe::{DebugProbeError, ProbeCreationError},
DebugProbeSelector,
};
/// The USB Command packet size.
const CMD_LEN: usize = 16;
/// The USB VendorID.
pub const USB_VID: u16 = 0x1cbe; // Luminary Micro Inc.
pub const TIMEOUT: Duration = Duration::from_millis(1000);
lazy_static! {
/// Map of USB PID to firmware version name and device endpoints.
pub static ref USB_PID_EP_MAP: HashMap<u16, ICDIInfo> = {
let mut m = HashMap::new();
m.insert(0x00fd, ICDIInfo::new("V2", 0x00fd, 0x02, 0x81, 0x83));
// m.insert(0x374b, STLinkInfo::new("V2-1", 0x374b, 0x01, 0x81, 0x82));
// m.insert(0x374a, STLinkInfo::new("V2-1", 0x374a, 0x01, 0x81, 0x82)); // Audio
// m.insert(0x3742, STLinkInfo::new("V2-1", 0x3742, 0x01, 0x81, 0x82)); // No MSD
// m.insert(0x3752, STLinkInfo::new("V2-1", 0x3752, 0x01, 0x81, 0x82)); // Unproven
// m.insert(0x374e, STLinkInfo::new("V3", 0x374e, 0x01, 0x81, 0x82));
// m.insert(0x374f, STLinkInfo::new("V3", 0x374f, 0x01, 0x81, 0x82)); // Bridge
// m.insert(0x3753, STLinkInfo::new("V3", 0x3753, 0x01, 0x81, 0x82)); // 2VCP
m
};
}
/// A helper struct to match STLink deviceinfo.
#[derive(Clone, Debug, Default)]
pub struct ICDIInfo {
pub version_name: String,
pub usb_pid: u16,
ep_out: u8,
ep_in: u8,
ep_swo: u8,
}
impl ICDIInfo {
pub fn new<V: Into<String>>(
version_name: V,
usb_pid: u16,
ep_out: u8,
ep_in: u8,
ep_swo: u8,
) -> Self {
Self {
version_name: version_name.into(),
usb_pid,
ep_out,
ep_in,
ep_swo,
}
}
}
pub(crate) struct ICDIUSBDevice {
device_handle: DeviceHandle<rusb::Context>,
info: ICDIInfo,
}
impl std::fmt::Debug for ICDIUSBDevice {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("ICDIUSBDevice")
.field("device_handle", &"DeviceHandle<rusb::Context>")
.field("info", &self.info)
.finish()
}
}
pub trait IcdiUsb: std::fmt::Debug {
fn write(
&mut self,
cmd: &[u8],
write_data: &[u8],
read_data: &mut [u8],
timeout: Duration,
) -> Result<(), DebugProbeError>;
/// Reset the USB device. This can be used to recover when the
/// STLink does not respond to USB requests.
fn reset(&mut self) -> Result<(), DebugProbeError>;
fn read_swo(
&mut self,
read_data: &mut [u8],
timeout: Duration,
) -> Result<usize, DebugProbeError>;
}
impl ICDIUSBDevice {
/// Creates and initializes a new USB device.
pub fn new_from_selector(
selector: impl Into<DebugProbeSelector>,
) -> Result<Self, ProbeCreationError> {
let selector = selector.into();
let context = Context::new()?;
log::debug!("Acquired libusb context.");
let device = context
.devices()?
.iter()
.filter(is_icdi_device)
.find_map(|device| {
let descriptor = device.device_descriptor().ok()?;
// First match the VID & PID.
if selector.vendor_id == descriptor.vendor_id()
&& selector.product_id == descriptor.product_id()
{
// If the VID & PID match, match the serial if one was given.
if let Some(serial) = &selector.serial_number {
let sn_str = read_serial_number(&device, &descriptor).ok();
if sn_str.as_ref() == Some(serial) {
Some(device)
} else {
None
}
} else {
// If no serial was given, the VID & PID match is enough; return the device.
Some(device)
}
} else {
None
}
})
.map_or(Err(ProbeCreationError::NotFound), Ok)?;
let mut device_handle = device.open()?;
log::debug!("Aquired handle for probe");
let config = device.active_config_descriptor()?;
log::debug!("Active config descriptor: {:?}", &config);
let descriptor = device.device_descriptor()?;
log::debug!("Device descriptor: {:?}", &descriptor);
let info = USB_PID_EP_MAP[&descriptor.product_id()].clone();
device_handle.claim_interface(0)?;
log::debug!("Claimed interface 0 of USB device.");
let mut endpoint_out = false;
let mut endpoint_in = false;
let mut endpoint_swo = false;
if let Some(interface) = config.interfaces().next() {
if let Some(descriptor) = interface.descriptors().next() {
for endpoint in descriptor.endpoint_descriptors() {
if endpoint.address() == info.ep_out {
endpoint_out = true;
} else if endpoint.address() == info.ep_in {
endpoint_in = true;
} else if endpoint.address() == info.ep_swo {
endpoint_swo = true;
}
}
}
}
if !endpoint_out {
return Err(IcdiError::EndpointNotFound.into());
}
if !endpoint_in {
return Err(IcdiError::EndpointNotFound.into());
}
if !endpoint_swo {
return Err(IcdiError::EndpointNotFound.into());
}
let usb_stlink = Self {
device_handle,
info,
};
log::debug!("Succesfully attached to ICDI.");
Ok(usb_stlink)
}
/// Closes the USB interface gracefully.
/// Internal helper.
fn close(&mut self) -> Result<(), Error> {
self.device_handle.release_interface(0)
}
}
impl IcdiUsb for ICDIUSBDevice {
/// Writes to the out EP and reads back data if needed.
/// First the `cmd` is sent.
/// In a second step `write_data` is transmitted.
/// And lastly, data will be read back until `read_data` is filled.
fn write(
&mut self,
cmd: &[u8],
write_data: &[u8],
read_data: &mut [u8],
timeout: Duration,
) -> Result<(), DebugProbeError> {
log::trace!(
"Sending command {:x?} to ICDI, timeout: {:?}",
cmd,
timeout
);
// Command phase.
assert!(cmd.len() <= CMD_LEN);
let mut padded_cmd = [0u8; CMD_LEN];
padded_cmd[..cmd.len()].copy_from_slice(cmd);
let ep_out = self.info.ep_out;
let ep_in = self.info.ep_in;
let written_bytes = self
.device_handle
.write_bulk(ep_out, &padded_cmd, timeout)
.map_err(|e| DebugProbeError::USB(Some(Box::new(e))))?;
if written_bytes != CMD_LEN {
return Err(IcdiError::NotEnoughBytesRead {
is: written_bytes,
should: CMD_LEN,
}
.into());
}
// Optional data out phase.
if !write_data.is_empty() {
let written_bytes = self
.device_handle
.write_bulk(ep_out, write_data, timeout)
.map_err(|e| DebugProbeError::USB(Some(Box::new(e))))?;
if written_bytes != write_data.len() {
return Err(IcdiError::NotEnoughBytesRead {
is: written_bytes,
should: write_data.len(),
}
.into());
}
}
// Optional data in phase.
if !read_data.is_empty() {
let read_bytes = self
.device_handle
.read_bulk(ep_in, read_data, timeout)
.map_err(|e| DebugProbeError::USB(Some(Box::new(e))))?;
if read_bytes != read_data.len() {
return Err(IcdiError::NotEnoughBytesRead {
is: read_bytes,
should: read_data.len(),
}
.into());
}
}
Ok(())
}
fn read_swo(
&mut self,
read_data: &mut [u8],
timeout: Duration,
) -> Result<usize, DebugProbeError> {
log::trace!(
"Reading {:?} SWO bytes to ICDI, timeout: {:?}",
read_data.len(),
timeout
);
let ep_swo = self.info.ep_swo;
if read_data.is_empty() {
Ok(0)
} else {
let read_bytes = self
.device_handle
.read_bulk(ep_swo, read_data, timeout)
.map_err(|e| DebugProbeError::USB(Some(Box::new(e))))?;
Ok(read_bytes)
}
}
/// Reset the USB device. This can be used to recover when the
/// STLink does not respond to USB requests.
fn reset(&mut self) -> Result<(), DebugProbeError> {
log::debug!("Resetting USB device of ICDI");
self.device_handle
.reset()
.map_err(|e| DebugProbeError::USB(Some(Box::new(e))))
}
}
impl Drop for ICDIUSBDevice {
fn drop(&mut self) {
// We ignore the error case as we can't do much about it anyways.
let _ = self.close();
}
}
| 30.792994 | 100 | 0.523322 |
f977ff32bb7cc77a6c4e2b7d67c5d922ee18bd5f | 1,100 | #[macro_use]
extern crate criterion;
extern crate delaunator;
extern crate rand;
use criterion::{AxisScale, Criterion, ParameterizedBenchmark, PlotConfiguration};
use delaunator::{triangulate, Point};
use rand::{Rng, SeedableRng, XorShiftRng};
use std::iter::repeat_with;
const COUNTS: &[usize] = &[100, 1000, 10_000, 100_000];
fn bench(c: &mut Criterion) {
let mut rng = XorShiftRng::from_seed([0; 16]);
let all_points: Vec<_> = repeat_with(|| rng.gen())
.map(|(x, y)| Point { x, y })
.take(*COUNTS.last().unwrap())
.collect();
let bench = ParameterizedBenchmark::new(
"triangulate",
move |b, &&count| {
let points = &all_points[..count];
b.iter(move || triangulate(points))
},
COUNTS,
);
c.bench(
"triangulate",
bench
.sample_size(20) // override to a small sample size, otherwise it takes too long
.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)),
);
}
criterion_group!(benches, bench);
criterion_main!(benches);
| 26.829268 | 93 | 0.624545 |
76e823757d2658e0f39b6c8d0eb7138782123206 | 1,929 | //! Fabian "ryg" Giesen's real-time DXT compressor
use stb_sys as sys;
/// DXT compression mode
pub enum CompressionMode {
/// Default compression mode
Normal = 0,
/// Use dithering. dubious win. never use for normal maps and the like!
Dither = 1,
/// High quality mode, does two refinement steps instead of 1. ~30-40% slower.
Highqual = 2,
}
/// Call `stb_compress_dxt_block()` for every block (you must pad) source should be a 4x4 block of
/// RGBA data in row-major order; Alpha channel is not stored if you specify `alpha=0` (but you
/// must supply some constant alpha in the alpha channel).
/// You can turn on dithering and "high quality" using mode.
pub fn stb_compress_dxt_block(
dest: &mut [u8],
src_rgba_four_bytes_per_pixel: &[u8],
alpha: i32,
mode: CompressionMode,
) {
debug_assert_ne!(src_rgba_four_bytes_per_pixel.len(), 0);
debug_assert_eq!(src_rgba_four_bytes_per_pixel.len() % 16, 0);
debug_assert_ne!(dest.len(), 0);
debug_assert_eq!(dest.len() % 8, 0);
unsafe {
sys::stb_compress_dxt_block(
dest.as_mut_ptr(),
src_rgba_four_bytes_per_pixel.as_ptr(),
alpha,
mode as i32,
)
}
}
pub fn stb_compress_bc4_block(dest: &mut [u8], src_r_one_byte_per_pixel: &[u8]) {
unsafe { sys::stb_compress_bc4_block(dest.as_mut_ptr(), src_r_one_byte_per_pixel.as_ptr()) }
}
pub fn stb_compress_bc5_block(dest: &mut [u8], src_rg_two_byte_per_pixel: &[u8]) {
unsafe { sys::stb_compress_bc5_block(dest.as_mut_ptr(), src_rg_two_byte_per_pixel.as_ptr()) }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn compress_dxt_block() {
let src: [u8; 16] = [
255, 255, 0, 128, 255, 0, 128, 255, 255, 128, 0, 128, 128, 0, 128, 255,
];
let mut dst: [u8; 8] = [0; 8];
stb_compress_dxt_block(&mut dst, &src, 0, CompressionMode::Normal);
}
}
| 32.15 | 98 | 0.649041 |
483387ebe505391ec581adf2ce24b3c15be17c4b | 28,497 | use crate::{
collector,
content_length::ContentLength,
message::Message,
metrics::{FUTURE_STARTED, REQUEST_STARTING},
plan::{EngineCall, Plan},
stats::Fact,
RequestType,
};
use backoff::backoff::Backoff;
use ic_canister_client::{
get_backoff_policy, update_path, Agent, HttpClientConfig, Sender as AgentSender,
};
use ic_types::{
messages::{Blob, MessageId, SignedRequestBytes},
time::current_time_and_expiry_time,
CanisterId,
};
use byte_unit::Byte;
use leaky_bucket::RateLimiter;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
convert::TryFrom,
env, fs,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, Instant, SystemTime},
};
use tokio::{
sync::{
mpsc::{channel, Receiver, Sender},
OwnedSemaphorePermit, Semaphore,
},
time::sleep,
};
use url::Url;
use crate::metrics::{
LATENCY_HISTOGRAM, QUERY_REPLY, UPDATE_SENT, UPDATE_SENT_REPLY, UPDATE_WAIT_REPLY,
};
#[derive(Serialize, Deserialize, Debug)]
struct WaitRequest {
request_id: MessageId,
request_type: String,
}
// Time to wait until the first request is issued
const START_OFFSET: Duration = Duration::from_millis(500);
// The initial number of permits = (--rps) * INITIAL_PERMITS_MULTIPLIER.
// This allows an initial burst @rps for INITIAL_PERMITS_MULTIPLIER secs,
// so that the ingress pool is sufficiently built up. After that, the
// permits are scaled down based on the response from the replicas.
const INITIAL_PERMITS_MULTIPLIER: usize = 10;
#[derive(PartialEq, Eq, Hash)]
enum CallFailure {
None,
OnSubmit,
OnWait,
}
pub struct CallResult {
fact: Fact,
counter: Option<u32>,
call_failure: CallFailure,
err_msg: Option<String>,
}
/// The engine of making requests. The engine implements making the requests and
/// producing facts for the stats collector to process.
#[derive(Clone)]
pub struct Engine {
agents: Vec<Agent>, // List of agents to be used in round-robin fashion when sending requests.
sender: AgentSender,
}
impl Engine {
/// Creates a new engine
pub fn new(
agent_sender: AgentSender,
sender_field: Blob,
urls: &[String],
http_client_config: HttpClientConfig,
) -> Engine {
let mut agents = Vec::with_capacity(urls.len());
let current_batch = urls.iter().map(|url| {
let mut agent = Agent::new_with_http_client_config(
Url::parse(url.as_str()).unwrap(),
agent_sender.clone(),
http_client_config,
);
agent.sender_field = sender_field.clone();
agent
});
agents.extend(current_batch);
Engine {
agents,
sender: agent_sender,
}
}
// Goes over all agents and makes sure they are connected and the corresponding
// replicas are healthy.
pub async fn wait_for_all_agents_to_be_healthy(&self) {
println!("Waiting for all replicas to be healthy!");
for agent in &self.agents {
while !agent.is_replica_healthy().await {
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
println!("All replicas are healthy!");
}
/// Execute requests in rps mode with the given number of requests per
/// second
///
/// Currently, we
/// use a single runtime. Not specifying this yields better throughput.
/// - `rps` - Request rate to issue against the IC
/// - `time_secs` - The time in seconds that the workload should be kept up
/// - `nonce` - Nonce to use for update calls
#[allow(clippy::too_many_arguments)]
pub async fn execute_rps(
&self,
rps: usize,
request_type: RequestType,
canister_method_name: String,
time_secs: usize,
nonce: String,
call_payload_size: Byte,
call_payload: Vec<u8>,
canister_id: &CanisterId,
periodic_output: bool,
) -> Vec<Fact> {
let requests: usize = time_secs * rps;
debug!("⏱️ Executing {} requests at {} rps", requests, rps);
let plan = Plan::new(
requests,
nonce,
call_payload_size,
call_payload,
*canister_id,
request_type,
canister_method_name,
);
let (collector, rec_handle) = collector::start::<Fact>(plan.clone(), periodic_output);
let (tx, rx) = channel(requests);
let time_origin = Instant::now();
let rx_handle = tokio::task::spawn(Engine::evaluate_requests(
rx,
collector,
Some(rps),
time_origin,
));
// Distribute the RPS within a second. If we just do
// .refill_interval(1).refill_amount(rps) we risk of having spikes of requests
// at every second.
let rate_limiter = RateLimiter::builder()
.initial(rps)
.interval(Duration::from_secs(1))
.refill(rps)
.build();
// Generate requests as allowed by the rate limiter
sleep(START_OFFSET).await;
let mut tx_handles = vec![];
for n in 0..requests {
rate_limiter.acquire_one().await;
let tx = tx.clone();
let plan = plan.clone();
let agent = self.agents[n % self.agents.len()].clone();
FUTURE_STARTED.inc();
tx_handles.push(tokio::task::spawn(async move {
REQUEST_STARTING.inc();
Engine::execute_request(agent, tx, time_origin, &plan, n).await;
}));
}
for tx_handle in tx_handles {
tx_handle.await.unwrap_or_else(|_| {
panic!("Await the tx failed.");
});
}
std::mem::drop(tx);
rx_handle.await.unwrap_or_else(|_| {
panic!("Await the rx failed.");
});
rec_handle.join().unwrap()
}
/// Finds the max rps currently possible with the given set of replicas.
/// - `rps` - Initial estimate of the max rps
/// - `time_secs` - The time in seconds that the workload should be kept up
/// - `nonce` - Nonce to use for update calls
#[allow(clippy::too_many_arguments)]
pub async fn evaluate_max_rps(
&self,
rps: usize,
request_type: RequestType,
canister_method_name: String,
time_secs: usize,
nonce: String,
call_payload_size: Byte,
call_payload: Vec<u8>,
canister_id: &CanisterId,
periodic_output: bool,
) -> Vec<Fact> {
println!(
"⏱️ Evaluating rps: initial_rps = {}, duration = {} sec",
rps, time_secs
);
let requests: usize = time_secs * rps; // This is just an upper estimate
let plan = Plan::new(
requests,
nonce,
call_payload_size,
call_payload,
*canister_id,
request_type,
canister_method_name,
);
let (collector, rec_handle) = collector::start::<Fact>(plan.clone(), periodic_output);
let (tx, rx) = channel(requests);
let rate_limiter = RateLimiter::builder()
.initial(rps)
.interval(Duration::from_secs(1))
.refill(rps)
.build();
// Initially holds (rps * INITIAL_PERMITS_MULTIPLIER) permits
let request_manager = RequestManager::new(rps * INITIAL_PERMITS_MULTIPLIER);
let time_origin = Instant::now();
let rx_handle = tokio::task::spawn(Engine::evaluate_requests(
rx,
collector,
Some(rps),
time_origin,
));
let end_time = time_origin
.checked_add(Duration::from_secs(time_secs as u64))
.unwrap();
let end_time = tokio::time::Instant::from_std(end_time);
let mut tx_handles = vec![];
let mut n = 0;
sleep(START_OFFSET).await;
let mut last_print = SystemTime::now();
while tokio::time::Instant::now() < end_time {
// Wait for the rate limiter to allow the next request
rate_limiter.acquire_one().await;
// Wait for a free slot based on the current rps estimate
let permit =
match tokio::time::timeout_at(end_time, request_manager.alloc_permit()).await {
Ok(permit) => permit,
Err(_) => break,
};
let tx = tx.clone();
let plan = plan.clone();
let request_manager_cl = request_manager.clone();
let agent = self.agents[n % self.agents.len()].clone();
FUTURE_STARTED.inc();
tx_handles.push(tokio::task::spawn(async move {
REQUEST_STARTING.inc();
let ret = Engine::execute_request(agent, tx, time_origin, &plan, n).await;
request_manager_cl.free_permit(permit, ret);
}));
n += 1;
if last_print.elapsed().unwrap() > Duration::from_secs(10) {
request_manager.show();
last_print = SystemTime::now();
}
}
println!("⏱️ Evaluating rps done: {:?}", time_origin.elapsed());
for tx_handle in tx_handles {
tx_handle.await.unwrap_or_else(|_| {
panic!("Await the tx failed.");
});
}
std::mem::drop(tx);
rx_handle.await.unwrap_or_else(|_| {
panic!("Await the rx failed.");
});
rec_handle.join().unwrap()
}
#[allow(clippy::too_many_arguments)]
async fn execute_request(
agent: Agent,
tx: Sender<CallResult>,
time_origin: Instant,
plan: &Plan,
n: usize,
) -> bool {
match plan.generate_call(n) {
EngineCall::Read { method, arg } => {
Engine::execute_query(&agent, tx, time_origin, plan, method, arg, n)
.await
.is_some()
}
EngineCall::Write { method, arg } => {
Engine::execute_update(&agent, tx, time_origin, plan, method, arg, n).await
}
}
}
#[allow(clippy::too_many_arguments)]
async fn execute_query(
agent: &Agent,
tx: Sender<CallResult>,
_time_origin: Instant,
plan: &Plan,
method: String,
arg: Vec<u8>,
n: usize,
) -> Option<u32> {
let time_query_start = Instant::now();
let response = agent.execute_query(&plan.canister_id, &*method, arg).await;
debug!("Sent query ({}). Response was: {:?}", n, response);
match response {
Ok(r) => {
QUERY_REPLY.with_label_values(&["replied"]).inc();
if let Ok(f) = env::var("RESULT_FILE") {
let bytes: Vec<u8> = r.clone().unwrap_or_default();
eprintln!("Writing results file: {}", &f);
fs::write(f, bytes).unwrap();
}
Engine::check_query(r, tx, time_query_start, plan).await
}
Err(e) => {
let err = format!("{:?}", e).to_string();
QUERY_REPLY.with_label_values(&[&err]).inc();
// This is broken. The issue is that the Error type returned by execute_query is
// not nicely structured, it's just a string.
let http_status = 0_u16;
tx.send(CallResult {
fact: Fact::record(
ContentLength::new(0),
http_status,
Instant::now().duration_since(time_query_start),
false,
plan.request_type,
),
counter: None,
call_failure: CallFailure::OnWait,
err_msg: Some(err),
})
.await
.unwrap_or_else(|_| {
panic!("Sending a fact failed.");
});
None
}
}
}
/// Make this return T, where T is produced by a fn given as argument from
/// the body of the reply
#[allow(clippy::too_many_arguments)]
async fn execute_update(
agent: &Agent,
tx: Sender<CallResult>,
time_origin: Instant,
plan: &Plan,
method: String,
arg: Vec<u8>,
n: usize,
) -> bool {
let nonce = plan.nonce.clone();
let deadline = Instant::now() + agent.ingress_timeout;
let mut backoff = get_backoff_policy();
let (request, request_id) = agent
.prepare_update_raw(
&plan.canister_id,
method,
arg,
format!("inc {} {}", nonce, n).into_bytes(),
current_time_and_expiry_time().1,
)
.unwrap();
debug!(
"Sending signed update. request id: {}. Message\n{:?}",
request_id, request
);
let content = SignedRequestBytes::try_from(request).unwrap().into();
let path = update_path(plan.canister_id);
let time_start = std::time::Instant::now();
debug!(
"Sending update() call ({}) after {}ms since origin",
n,
Instant::now().duration_since(time_origin).as_millis()
);
// TODO: for now, this uses MAX_WAIT_INGRESS as timeout for sending the request.
// This should be adjusted to a more appropriate value for posting update
// requests.
UPDATE_SENT.inc();
let res = agent
.http_client()
.send_post_request(
agent.url.join(path.as_str()).unwrap().as_str(),
content,
tokio::time::Instant::from_std(deadline),
)
.await;
match res {
Err(e) => {
let err_msg = format!("[{:?}]: Update send failed{:?}", request_id, e);
UPDATE_SENT_REPLY
.with_label_values(&["update_send_failed"])
.inc();
tx.send(CallResult {
fact: Fact::record(
ContentLength::new(0),
11,
Instant::now().duration_since(time_start),
false,
plan.request_type,
),
counter: None,
call_failure: CallFailure::OnSubmit,
err_msg: Some(err_msg),
})
.await
.unwrap_or_else(|_| {
panic!("Sending a fact failed.");
});
false
}
Ok((body, status)) => {
debug!(
"update() request success ({}): {:?}, {}ms after origin",
n,
status,
Instant::now().duration_since(time_start).as_millis()
);
UPDATE_SENT_REPLY
.with_label_values(&[&format!("{:?}", status)])
.inc();
let update_status_code = status.as_u16();
if update_status_code != 202 {
let err_msg = format!(
"[{:?}]: Update returned non-202: {}",
request_id, update_status_code
);
UPDATE_SENT_REPLY
.with_label_values(&["update_send_failed"])
.inc();
tx.send(CallResult {
fact: Fact::record(
ContentLength::new(0),
update_status_code,
Instant::now().duration_since(time_start),
false,
plan.request_type,
),
counter: None,
call_failure: CallFailure::OnSubmit,
err_msg: Some(err_msg),
})
.await
.unwrap_or_else(|_| {
panic!("Sending a fact failed.");
});
return false;
}
let mut finished = false;
// Check request status for the first time after 2s (~ time between blocks)
let mut next_poll_time = Instant::now() + Duration::from_secs(2);
while !finished && next_poll_time < deadline {
tokio::time::sleep_until(tokio::time::Instant::from_std(next_poll_time)).await;
next_poll_time = Instant::now() + backoff.next_backoff().unwrap();
let wait = Engine::wait_ingress_for_counter_canister(
agent,
request_id.clone(),
&plan.canister_id,
deadline,
)
.await;
match wait {
Ok((result, counter)) => {
UPDATE_WAIT_REPLY
.with_label_values(&[&format!("{:?}", result)])
.inc();
match result.as_ref() {
"replied" => {
assert!(result == "replied");
let counter = counter.expect("Did not receive counter value");
let http_status = status.as_u16();
debug!(
"🚀 Got return code ({}): {} - {} after since start {}ms since origin {}ms",
n,
result,
counter,
Instant::now().duration_since(time_start).as_millis(),
Instant::now().duration_since(time_origin).as_millis()
);
tx.send(CallResult {
fact: Fact::record(
ContentLength::new(body.len() as u64),
http_status,
Instant::now().duration_since(time_start),
true,
plan.request_type,
),
counter: Some(counter),
call_failure: CallFailure::None,
err_msg: None,
})
.await
.unwrap_or_else(|_| {
panic!("Sending a fact failed.");
});
finished = true;
}
"unknown" | "processing" | "received" => {
debug!(
"Received ({}) {} ingress status after {}ms since origin",
n,
result,
Instant::now().duration_since(time_origin).as_millis()
);
}
_ => {
let err_msg = format!(
"[{:?}]: Update failed: other status {:?}",
request_id, result
)
.to_string();
tx.send(CallResult {
fact: Fact::record(
ContentLength::new(body.len() as u64),
33,
Instant::now().duration_since(time_start),
false,
plan.request_type,
),
counter: None,
call_failure: CallFailure::OnWait,
err_msg: Some(err_msg),
})
.await
.unwrap_or_else(|_| {
panic!("Sending a fact failed.");
});
finished = true;
}
}
}
Err(e) => {
eprintln!("[{:?}]: Update poll failed: {:?}", request_id, e);
UPDATE_WAIT_REPLY
.with_label_values(&[&format!("{:?}", e)])
.inc();
}
}
}
if !finished {
let err_msg = format!("[{:?}]: Update did not finish in time", request_id);
tx.send(CallResult {
fact: Fact::record(
ContentLength::new(0),
44,
Instant::now().duration_since(time_start),
false,
plan.request_type,
),
counter: None,
call_failure: CallFailure::OnWait,
err_msg: Some(err_msg),
})
.await
.unwrap_or_else(|_| {
panic!("Sending a fact failed.");
});
}
finished
}
}
}
async fn check_query(
resp: Option<Vec<u8>>,
tx: Sender<CallResult>,
time_query_start: Instant,
plan: &Plan,
) -> Option<u32> {
debug!("Response: {:?}", resp);
let counter = resp
.as_ref()
.map(|r| Engine::interpret_counter_canister_response(r));
debug!("🚀 Got counter value: {:?}", counter);
let latency = Instant::now().duration_since(time_query_start);
LATENCY_HISTOGRAM
.with_label_values(&["query", "replied"])
.observe(latency.as_secs() as f64 + latency.subsec_nanos() as f64 * 1e-9);
tx.send(CallResult {
fact: Fact::record(
ContentLength::new(resp.unwrap_or_else(Vec::new).len() as u64),
200_u16,
latency,
true,
plan.request_type,
),
counter,
call_failure: CallFailure::None,
err_msg: None,
})
.await
.unwrap_or_else(|_| {
panic!("Sending a fact failed.");
});
counter
}
async fn evaluate_requests(
mut rx: Receiver<CallResult>,
collector: std::sync::mpsc::Sender<Message<Fact>>,
rps: Option<usize>,
_time_start: Instant,
) {
let mut max_counter = 0;
let mut failures = HashMap::new();
while let Some(result) = rx.recv().await {
collector
.send(Message::Body(result.fact))
.expect("Failed to collect facts for rps/update() calls");
if let Some(err_msg) = result.err_msg {
eprintln!("{}", err_msg);
}
// Increment counter for failures
let stat = failures.entry(result.call_failure).or_insert(0);
*stat += 1;
if let Some(counter) = result.counter {
max_counter = std::cmp::max(max_counter, counter);
crate::metrics::COUNTER_VALUE.inc();
}
}
collector
.send(Message::Log(format!(
"requested: {} - 🚀 Max counter value seen: {} - submit failures: {} - wait failures: {}",
rps.unwrap_or(0),
max_counter,
failures.get(&CallFailure::OnSubmit).unwrap_or(&0),
failures.get(&CallFailure::OnWait).unwrap_or(&0),
)))
.unwrap();
collector.send(Message::Eof).unwrap();
}
/// Given the raw bytes of the "arg" counter canister response (NOT the
/// top-level response), returns the corresponding counter value.
fn interpret_counter_canister_response(bytes: &[u8]) -> u32 {
if bytes.len() >= 4 {
let first_four_bytes: &[u8] = &bytes[0..4];
let mut bytes_as_num = [0; 4];
bytes_as_num.copy_from_slice(first_four_bytes);
u32::from_le_bytes(bytes_as_num)
} else {
0
}
}
async fn wait_ingress_for_counter_canister(
agent: &Agent,
request_id: MessageId,
canister_id: &CanisterId,
deadline: Instant,
) -> Result<(String, Option<u32>), String> {
let call_response = agent
.wait_ingress(request_id, deadline, canister_id)
.await?;
if let Ok(f) = env::var("RESULT_FILE") {
let bytes: Vec<u8> = call_response.reply.clone().unwrap_or_default();
eprintln!("Writing results file: {}", &f);
fs::write(f, bytes).unwrap();
}
let counter_value = call_response
.reply
.as_ref()
.map(|bytes| Engine::interpret_counter_canister_response(bytes));
Ok((call_response.status, counter_value))
}
}
// Manages the number of outstanding requests in flight
#[derive(Clone)]
struct RequestManager {
// Number of requests to start with
initial_requests: usize,
// The semaphore to acquire/release permits
permits: Arc<Semaphore>,
allocs: Arc<AtomicUsize>,
success: Arc<AtomicUsize>,
errors: Arc<AtomicUsize>,
}
impl RequestManager {
fn new(initial_requests: usize) -> Self {
Self {
initial_requests,
permits: Arc::new(Semaphore::new(initial_requests)),
allocs: Arc::new(AtomicUsize::new(0)),
success: Arc::new(AtomicUsize::new(0)),
errors: Arc::new(AtomicUsize::new(0)),
}
}
// Waits for one permit to become available
async fn alloc_permit(&self) -> OwnedSemaphorePermit {
self.allocs.fetch_add(1, Ordering::SeqCst);
self.permits.clone().acquire_owned().await.unwrap()
}
// Called on request completion.
// If the request was successful, the token is returned to the
// free pool so that more requests can be issued in its place.
// If the request failed, the token is dropped without returning
// to the pool. This dynamically adjusts the pool size/max
// outstanding requests.
fn free_permit(&self, permit: OwnedSemaphorePermit, request_success: bool) {
if !request_success {
// TODO: keep a min threshold, look at the specific
// HTTP status code
permit.forget();
self.errors.fetch_add(1, Ordering::SeqCst);
} else {
self.success.fetch_add(1, Ordering::SeqCst);
}
}
fn show(&self) {
let allocs = self.allocs.load(Ordering::SeqCst);
let success = self.success.load(Ordering::SeqCst);
let errors = self.errors.load(Ordering::SeqCst);
println!(
"RequestManager: initial_capacity = {}, allocs = {}, success = {}, errors = {},\
current capacity = {}, free permits = {}",
self.initial_requests,
allocs,
success,
errors,
self.initial_requests - errors,
self.permits.available_permits()
);
}
}
| 35.845283 | 115 | 0.478296 |
d9ccd5ee8c46ec2c6426eadb951b7206d1bd601b | 445 | // Copyright 2021 A Veenstra.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0> or the
// MIT license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. This file may not be copied,
// modified, or distributed except according to those terms.
fn main() -> Result<(), String> {
println!("Parameter count: {:?}", sut::get_parameter_count()?);
Ok(())
}
| 40.454545 | 120 | 0.696629 |
169ca28364c960c40b696e7740c4a152626ba77d | 4,280 | use nu_engine::CallExt;
use nu_protocol::{
ast::Call,
engine::{Command, EngineState, Stack},
Category, Example, PipelineData, ShellError, Signature, Spanned, SyntaxShape, Type,
};
use super::super::values::NuDataFrame;
#[derive(Clone)]
pub struct SampleDF;
impl Command for SampleDF {
fn name(&self) -> &str {
"sample"
}
fn usage(&self) -> &str {
"Create sample dataframe"
}
fn signature(&self) -> Signature {
Signature::build(self.name())
.named(
"n-rows",
SyntaxShape::Int,
"number of rows to be taken from dataframe",
Some('n'),
)
.named(
"fraction",
SyntaxShape::Number,
"fraction of dataframe to be taken",
Some('f'),
)
.named(
"seed",
SyntaxShape::Number,
"seed for the selection",
Some('s'),
)
.switch("replace", "sample with replace", Some('e'))
.switch("shuffle", "shuffle sample", Some('u'))
.category(Category::Custom("dataframe".into()))
}
fn examples(&self) -> Vec<Example> {
vec![
Example {
description: "Sample rows from dataframe",
example: "[[a b]; [1 2] [3 4]] | into df | sample -n 1",
result: None, // No expected value because sampling is random
},
Example {
description: "Shows sample row using fraction and replace",
example: "[[a b]; [1 2] [3 4] [5 6]] | into df | sample -f 0.5 -e",
result: None, // No expected value because sampling is random
},
]
}
fn input_type(&self) -> Type {
Type::Custom("dataframe".into())
}
fn output_type(&self) -> Type {
Type::Custom("dataframe".into())
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
command(engine_state, stack, call, input)
}
}
fn command(
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let rows: Option<Spanned<usize>> = call.get_flag(engine_state, stack, "n-rows")?;
let fraction: Option<Spanned<f64>> = call.get_flag(engine_state, stack, "fraction")?;
let seed: Option<u64> = call
.get_flag::<i64>(engine_state, stack, "seed")?
.map(|val| val as u64);
let replace: bool = call.has_flag("replace");
let shuffle: bool = call.has_flag("shuffle");
let df = NuDataFrame::try_from_pipeline(input, call.head)?;
match (rows, fraction) {
(Some(rows), None) => df
.as_ref()
.sample_n(rows.item, replace, shuffle, seed)
.map_err(|e| {
ShellError::GenericError(
"Error creating sample".into(),
e.to_string(),
Some(rows.span),
None,
Vec::new(),
)
}),
(None, Some(frac)) => df
.as_ref()
.sample_frac(frac.item, replace, shuffle, seed)
.map_err(|e| {
ShellError::GenericError(
"Error creating sample".into(),
e.to_string(),
Some(frac.span),
None,
Vec::new(),
)
}),
(Some(_), Some(_)) => Err(ShellError::GenericError(
"Incompatible flags".into(),
"Only one selection criterion allowed".into(),
Some(call.head),
None,
Vec::new(),
)),
(None, None) => Err(ShellError::GenericError(
"No selection".into(),
"No selection criterion was found".into(),
Some(call.head),
Some("Perhaps you want to use the flag -n or -f".into()),
Vec::new(),
)),
}
.map(|df| PipelineData::Value(NuDataFrame::dataframe_into_value(df, call.head), None))
}
| 30.791367 | 90 | 0.491355 |
f98631e36d49d602a6657a4c68e2365a817aa2f3 | 5,428 | #![allow(dead_code)]
#![allow(non_upper_case_globals)]
#![allow(unused_parens)]
#![feature(asm)]
#![feature(concat_idents)]
extern crate event;
extern crate netutils;
extern crate syscall;
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::{Read, Write, Result};
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::sync::Arc;
use event::EventQueue;
use syscall::{Packet, SchemeMut, PHYSMAP_NO_CACHE, PHYSMAP_WRITE};
use syscall::error::EWOULDBLOCK;
pub mod device;
fn main() {
let mut args = env::args().skip(1);
let mut name = args.next().expect("alxd: no name provided");
name.push_str("_alx");
let bar_str = args.next().expect("alxd: no address provided");
let bar = usize::from_str_radix(&bar_str, 16).expect("alxd: failed to parse address");
let irq_str = args.next().expect("alxd: no irq provided");
let irq = irq_str.parse::<u8>().expect("alxd: failed to parse irq");
print!("{}", format!(" + ALX {} on: {:X}, IRQ: {}\n", name, bar, irq));
// Daemonize
if unsafe { syscall::clone(0).unwrap() } == 0
{
let socket_fd = syscall::open(":network", syscall::O_RDWR | syscall::O_CREAT | syscall::O_NONBLOCK).expect("alxd: failed to create network scheme");
let socket = Arc::new(RefCell::new(unsafe { File::from_raw_fd(socket_fd) }));
let mut irq_file = File::open(format!("irq:{}", irq)).expect("alxd: failed to open IRQ file");
let address = unsafe { syscall::physmap(bar, 128*1024, PHYSMAP_WRITE | PHYSMAP_NO_CACHE).expect("alxd: failed to map address") };
{
let device = Arc::new(RefCell::new(unsafe { device::Alx::new(address).expect("alxd: failed to allocate device") }));
let mut event_queue = EventQueue::<usize>::new().expect("alxd: failed to create event queue");
syscall::setrens(0, 0).expect("alxd: failed to enter null namespace");
let todo = Arc::new(RefCell::new(Vec::<Packet>::new()));
let device_irq = device.clone();
let socket_irq = socket.clone();
let todo_irq = todo.clone();
event_queue.add(irq_file.as_raw_fd(), move |_event| -> Result<Option<usize>> {
let mut irq = [0; 8];
irq_file.read(&mut irq)?;
if unsafe { device_irq.borrow_mut().intr_legacy() } {
irq_file.write(&mut irq)?;
let mut todo = todo_irq.borrow_mut();
let mut i = 0;
while i < todo.len() {
let a = todo[i].a;
device_irq.borrow_mut().handle(&mut todo[i]);
if todo[i].a == (-EWOULDBLOCK) as usize {
todo[i].a = a;
i += 1;
} else {
socket_irq.borrow_mut().write(&mut todo[i])?;
todo.remove(i);
}
}
let next_read = device_irq.borrow().next_read();
if next_read > 0 {
return Ok(Some(next_read));
}
}
Ok(None)
}).expect("alxd: failed to catch events on IRQ file");
let socket_packet = socket.clone();
event_queue.add(socket_fd, move |_event| -> Result<Option<usize>> {
loop {
let mut packet = Packet::default();
if socket_packet.borrow_mut().read(&mut packet)? == 0 {
break;
}
let a = packet.a;
device.borrow_mut().handle(&mut packet);
if packet.a == (-EWOULDBLOCK) as usize {
packet.a = a;
todo.borrow_mut().push(packet);
} else {
socket_packet.borrow_mut().write(&mut packet)?;
}
}
let next_read = device.borrow().next_read();
if next_read > 0 {
return Ok(Some(next_read));
}
Ok(None)
}).expect("alxd: failed to catch events on IRQ file");
for event_count in event_queue.trigger_all(event::Event {
fd: 0,
flags: 0,
}).expect("alxd: failed to trigger events") {
socket.borrow_mut().write(&Packet {
id: 0,
pid: 0,
uid: 0,
gid: 0,
a: syscall::number::SYS_FEVENT,
b: 0,
c: syscall::flag::EVENT_READ,
d: event_count
}).expect("alxd: failed to write event");
}
loop {
let event_count = event_queue.run().expect("alxd: failed to handle events");
socket.borrow_mut().write(&Packet {
id: 0,
pid: 0,
uid: 0,
gid: 0,
a: syscall::number::SYS_FEVENT,
b: 0,
c: syscall::flag::EVENT_READ,
d: event_count
}).expect("alxd: failed to write event");
}
}
unsafe { let _ = syscall::physunmap(address); }
}
}
| 36.92517 | 156 | 0.482314 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.