hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
161a0220e639480d417db51eaa4da809214c8546 | 7,610 | // If `src` can be promoted to `$dst`, then it must be Ok to cast `dst` back to
// `$src`
macro_rules! promote_and_back {
($($src:ident => $($dst:ident),+);+;) => {
mod demoting_to {
$(
mod $src {
mod from {
use crate::From;
$(
quickcheck! {
fn $dst(src: $src) -> bool {
$src::cast($dst::cast(src)).is_ok()
}
}
)+
}
}
)+
}
}
}
#[cfg(target_pointer_width = "32")]
promote_and_back! {
i8 => f32, f64, i16, i32, isize, i64, i128 ;
i16 => f32, f64, i32, isize, i64, i128 ;
i32 => f64, i64, i128 ;
isize => f64, i64, i128 ;
i64 => i128 ;
u8 => f32, f64, i16, i32, isize, i64, i128, u16, u32, usize, u64, u128;
u16 => f32, f64, i32, isize, i64, i128, u32, usize, u64, u128;
u32 => f64, i64, i128, u64, u128;
usize => f64, i64, i128, u64, u128;
u64 => i128, u128;
}
#[cfg(target_pointer_width = "64")]
promote_and_back! {
i8 => f32, f64, i16, i32, i64, isize, i128 ;
i16 => f32, f64, i32, i64, isize, i128 ;
i32 => f64, i64, isize, i128 ;
i64 => i128 ;
isize => i128 ;
u8 => f32, f64, i16, i32, i64, isize, i128, u16, u32, u64, usize, u128;
u16 => f32, f64, i32, i64, isize, i128, u32, u64, usize, u128;
u32 => f64, i64, isize, i128, u64, usize, u128;
u64 => i128, u128;
usize => i128, u128;
}
// If it's Ok to cast `src` to `$dst`, it must also be Ok to cast `dst` back to
// `$src`
macro_rules! symmetric_cast_between {
($($src:ident => $($dst:ident),+);+;) => {
mod symmetric_cast_between {
$(
mod $src {
mod and {
use quickcheck::TestResult;
use crate::From;
$(
quickcheck! {
fn $dst(src: $src) -> TestResult {
if let Ok(dst) = $dst::cast(src) {
TestResult::from_bool(
$src::cast(dst).is_ok())
} else {
TestResult::discard()
}
}
}
)+
}
}
)+
}
}
}
#[cfg(target_pointer_width = "32")]
symmetric_cast_between! {
u8 => i8 ;
u16 => i8, i16 ;
u32 => i8, i16, i32 ;
usize => i8, i16, i32 ;
u64 => i8, i16, i32, i64, isize;
}
#[cfg(target_pointer_width = "64")]
symmetric_cast_between! {
u8 => i8 ;
u16 => i8, i16 ;
u32 => i8, i16, i32 ;
u64 => i8, i16, i32, i64, isize ;
usize => i8, i16, i32, i64, isize ;
u128 => i8, i16, i32, i64, isize, i128;
}
macro_rules! from_float {
($($src:ident => $($dst:ident),+);+;) => {
$(
mod $src {
mod inf {
mod to {
use crate::{Error, From};
$(
#[test]
fn $dst() {
let _0: $src = 0.;
let _1: $src = 1.;
let inf = _1 / _0;
let neg_inf = -_1 / _0;
assert_eq!($dst::cast(inf),
Err(Error::Infinite));
assert_eq!($dst::cast(neg_inf),
Err(Error::Infinite));
}
)+
}
}
mod nan {
mod to {
use crate::{Error, From};
$(
#[test]
fn $dst() {
let _0: $src = 0.;
let nan = _0 / _0;
assert_eq!($dst::cast(nan),
Err(Error::NaN));
}
)+
}
}
}
)+
}
}
from_float! {
f32 => i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize;
f64 => i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize;
}
#[test]
fn test_fl_conversion() {
use crate::u128;
assert_eq!(u128(42.0f32), Ok(42));
}
#[test]
fn gh16() {
assert_eq!(super::u64(-0.01_f64), Ok(0));
assert_eq!(super::u64(-0.99_f32), Ok(0));
assert_eq!(super::u32(-0.99_f64), Ok(0));
assert_eq!(super::u32(-0.01_f32), Ok(0));
assert_eq!(super::u64(0.01_f64), Ok(0));
assert_eq!(super::u64(0.99_f32), Ok(0));
assert_eq!(super::u32(0.99_f64), Ok(0));
assert_eq!(super::u32(0.01_f32), Ok(0));
}
#[test]
fn gh15() {
assert_eq!(super::u32(32_f32.exp2()), Err(super::Error::Overflow));
assert_eq!(super::u32(32_f64.exp2()), Err(super::Error::Overflow));
assert_eq!(super::u64(64_f32.exp2()), Err(super::Error::Overflow));
assert_eq!(super::u64(64_f64.exp2()), Err(super::Error::Overflow));
assert_eq!(super::u8(8_f32.exp2()), Err(super::Error::Overflow));
assert_eq!(super::u8(8_f64.exp2()), Err(super::Error::Overflow));
assert_eq!(super::u16(16_f32.exp2()), Err(super::Error::Overflow));
assert_eq!(super::u16(16_f64.exp2()), Err(super::Error::Overflow));
}
#[test]
fn gh23_lossless_integer_max_min_to_float() {
// f32::MANTISSA_DIGITS = 24
assert_eq!(Ok(u8::MAX), super::u8(255f32));
assert_eq!(Ok(u16::MAX), super::u16(65_535f32));
// f64::MANTISSA_DIGITS = 53
assert_eq!(Ok(u8::MAX), super::u8(255f64));
assert_eq!(Ok(u16::MAX), super::u16(65_535f64));
assert_eq!(Ok(u32::MAX), super::u32(4_294_967_295f64));
// also check negative values (not part of the original bug)
assert_eq!(Ok(i8::MIN), super::i8(-128f32));
assert_eq!(Ok(i16::MIN), super::i16(-32_768f32));
assert_eq!(Ok(i8::MIN), super::i8(-128f64));
assert_eq!(Ok(i16::MIN), super::i16(-32_768f64));
assert_eq!(Ok(i32::MIN), super::i32(-2_147_483_648f64));
}
| 36.411483 | 82 | 0.360184 |
288b36b3512fea892bfd125cccba0d31ea23bd87 | 31,788 | #![cfg_attr(not(feature = "std"), no_std)]
use ink_lang as ink;
#[ink::contract]
mod erc20 {
use ink_storage::{
lazy::{
Lazy,
Mapping,
},
traits::SpreadAllocate,
};
/// A simple ERC-20 contract.
#[ink(storage)]
#[derive(SpreadAllocate)]
pub struct Erc20 {
/// Total token supply.
total_supply: Lazy<Balance>,
/// Mapping from owner to number of owned token.
balances: Mapping<AccountId, Balance>,
/// Mapping of the token amount which an account is allowed to withdraw
/// from another account.
allowances: Mapping<(AccountId, AccountId), Balance>,
}
/// Event emitted when a token transfer occurs.
#[ink(event)]
pub struct Transfer {
#[ink(topic)]
from: Option<AccountId>,
#[ink(topic)]
to: Option<AccountId>,
value: Balance,
}
/// Event emitted when an approval occurs that `spender` is allowed to withdraw
/// up to the amount of `value` tokens from `owner`.
#[ink(event)]
pub struct Approval {
#[ink(topic)]
owner: AccountId,
#[ink(topic)]
spender: AccountId,
value: Balance,
}
/// The ERC-20 error types.
#[derive(Debug, PartialEq, Eq, scale::Encode, scale::Decode)]
#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))]
pub enum Error {
/// Returned if not enough balance to fulfill a request is available.
InsufficientBalance,
/// Returned if not enough allowance to fulfill a request is available.
InsufficientAllowance,
}
/// The ERC-20 result type.
pub type Result<T> = core::result::Result<T, Error>;
impl Erc20 {
/// Creates a new ERC-20 contract with the specified initial supply.
#[ink(constructor)]
pub fn new(initial_supply: Balance) -> Self {
ink_lang::codegen::initialize_contract(|contract| {
Self::new_init(contract, initial_supply)
})
}
/// Default initializes the ERC-20 contract with the specified initial supply.
fn new_init(&mut self, initial_supply: Balance) {
let caller = Self::env().caller();
self.balances.insert(&caller, &initial_supply);
Lazy::set(&mut self.total_supply, initial_supply);
Self::env().emit_event(Transfer {
from: None,
to: Some(caller),
value: initial_supply,
});
}
/// Returns the total token supply.
#[ink(message)]
pub fn total_supply(&self) -> Balance {
*self.total_supply
}
/// Returns the account balance for the specified `owner`.
///
/// Returns `0` if the account is non-existent.
#[ink(message)]
pub fn balance_of(&self, owner: AccountId) -> Balance {
self.balance_of_impl(&owner)
}
/// Returns the account balance for the specified `owner`.
///
/// Returns `0` if the account is non-existent.
///
/// # Note
///
/// Prefer to call this method over `balance_of` since this
/// works using references which are more efficient in Wasm.
#[inline]
fn balance_of_impl(&self, owner: &AccountId) -> Balance {
self.balances.get(owner).unwrap_or_default()
}
/// Returns the amount which `spender` is still allowed to withdraw from `owner`.
///
/// Returns `0` if no allowance has been set.
#[ink(message)]
pub fn allowance(&self, owner: AccountId, spender: AccountId) -> Balance {
self.allowance_impl(&owner, &spender)
}
/// Returns the amount which `spender` is still allowed to withdraw from `owner`.
///
/// Returns `0` if no allowance has been set.
///
/// # Note
///
/// Prefer to call this method over `allowance` since this
/// works using references which are more efficient in Wasm.
#[inline]
fn allowance_impl(&self, owner: &AccountId, spender: &AccountId) -> Balance {
self.allowances.get((owner, spender)).unwrap_or_default()
}
/// Transfers `value` amount of tokens from the caller's account to account `to`.
///
/// On success a `Transfer` event is emitted.
///
/// # Errors
///
/// Returns `InsufficientBalance` error if there are not enough tokens on
/// the caller's account balance.
#[ink(message)]
pub fn transfer(&mut self, to: AccountId, value: Balance) -> Result<()> {
let from = self.env().caller();
self.transfer_from_to(&from, &to, value)
}
/// Allows `spender` to withdraw from the caller's account multiple times, up to
/// the `value` amount.
///
/// If this function is called again it overwrites the current allowance with `value`.
///
/// An `Approval` event is emitted.
#[ink(message)]
pub fn approve(&mut self, spender: AccountId, value: Balance) -> Result<()> {
let owner = self.env().caller();
self.allowances.insert((&owner, &spender), &value);
self.env().emit_event(Approval {
owner,
spender,
value,
});
Ok(())
}
/// Transfers `value` tokens on the behalf of `from` to the account `to`.
///
/// This can be used to allow a contract to transfer tokens on ones behalf and/or
/// to charge fees in sub-currencies, for example.
///
/// On success a `Transfer` event is emitted.
///
/// # Errors
///
/// Returns `InsufficientAllowance` error if there are not enough tokens allowed
/// for the caller to withdraw from `from`.
///
/// Returns `InsufficientBalance` error if there are not enough tokens on
/// the account balance of `from`.
#[ink(message)]
pub fn transfer_from(
&mut self,
from: AccountId,
to: AccountId,
value: Balance,
) -> Result<()> {
let caller = self.env().caller();
let allowance = self.allowance_impl(&from, &caller);
if allowance < value {
return Err(Error::InsufficientAllowance)
}
self.transfer_from_to(&from, &to, value)?;
self.allowances
.insert((&from, &caller), &(allowance - value));
Ok(())
}
/// Transfers `value` amount of tokens from the caller's account to account `to`.
///
/// On success a `Transfer` event is emitted.
///
/// # Errors
///
/// Returns `InsufficientBalance` error if there are not enough tokens on
/// the caller's account balance.
fn transfer_from_to(
&mut self,
from: &AccountId,
to: &AccountId,
value: Balance,
) -> Result<()> {
let from_balance = self.balance_of_impl(from);
if from_balance < value {
return Err(Error::InsufficientBalance)
}
self.balances.insert(from, &(from_balance - value));
let to_balance = self.balance_of_impl(to);
self.balances.insert(to, &(to_balance + value));
self.env().emit_event(Transfer {
from: Some(*from),
to: Some(*to),
value,
});
Ok(())
}
}
/// Unit tests.
#[cfg(not(feature = "ink-experimental-engine"))]
#[cfg(test)]
mod tests {
/// Imports all the definitions from the outer scope so we can use them here.
use super::*;
type Event = <Erc20 as ::ink_lang::reflect::ContractEventBase>::Type;
use ink_lang as ink;
fn assert_transfer_event(
event: &ink_env::test::EmittedEvent,
expected_from: Option<AccountId>,
expected_to: Option<AccountId>,
expected_value: Balance,
) {
let decoded_event = <Event as scale::Decode>::decode(&mut &event.data[..])
.expect("encountered invalid contract event data buffer");
if let Event::Transfer(Transfer { from, to, value }) = decoded_event {
assert_eq!(from, expected_from, "encountered invalid Transfer.from");
assert_eq!(to, expected_to, "encountered invalid Transfer.to");
assert_eq!(value, expected_value, "encountered invalid Trasfer.value");
} else {
panic!("encountered unexpected event kind: expected a Transfer event")
}
let expected_topics = vec![
encoded_into_hash(&PrefixedValue {
value: b"Erc20::Transfer",
prefix: b"",
}),
encoded_into_hash(&PrefixedValue {
prefix: b"Erc20::Transfer::from",
value: &expected_from,
}),
encoded_into_hash(&PrefixedValue {
prefix: b"Erc20::Transfer::to",
value: &expected_to,
}),
encoded_into_hash(&PrefixedValue {
prefix: b"Erc20::Transfer::value",
value: &expected_value,
}),
];
for (n, (actual_topic, expected_topic)) in
event.topics.iter().zip(expected_topics).enumerate()
{
let topic = actual_topic
.decode::<Hash>()
.expect("encountered invalid topic encoding");
assert_eq!(topic, expected_topic, "encountered invalid topic at {}", n);
}
}
/// The default constructor does its job.
#[ink::test]
fn new_works() {
// Constructor works.
let _erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(1, emitted_events.len());
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
}
/// The total supply was applied.
#[ink::test]
fn total_supply_works() {
// Constructor works.
let erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
// Get the token total supply.
assert_eq!(erc20.total_supply(), 100);
}
/// Get the actual balance of an account.
#[ink::test]
fn balance_of_works() {
// Constructor works
let erc20 = Erc20::new(100);
// Transfer event triggered during initial construction
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>()
.expect("Cannot get accounts");
// Alice owns all the tokens on contract instantiation
assert_eq!(erc20.balance_of(accounts.alice), 100);
// Bob does not owns tokens
assert_eq!(erc20.balance_of(accounts.bob), 0);
}
#[ink::test]
fn transfer_works() {
// Constructor works.
let mut erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>()
.expect("Cannot get accounts");
assert_eq!(erc20.balance_of(accounts.bob), 0);
// Alice transfers 10 tokens to Bob.
assert_eq!(erc20.transfer(accounts.bob, 10), Ok(()));
// Bob owns 10 tokens.
assert_eq!(erc20.balance_of(accounts.bob), 10);
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events.len(), 2);
// Check first transfer event related to ERC-20 instantiation.
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
// Check the second transfer event relating to the actual trasfer.
assert_transfer_event(
&emitted_events[1],
Some(AccountId::from([0x01; 32])),
Some(AccountId::from([0x02; 32])),
10,
);
}
#[ink::test]
fn invalid_transfer_should_fail() {
// Constructor works.
let mut erc20 = Erc20::new(100);
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>()
.expect("Cannot get accounts");
assert_eq!(erc20.balance_of(accounts.bob), 0);
// Get contract address.
let callee = ink_env::account_id::<ink_env::DefaultEnvironment>();
// Create call
let mut data =
ink_env::test::CallData::new(ink_env::call::Selector::new([0x00; 4])); // balance_of
data.push_arg(&accounts.bob);
// Push the new execution context to set Bob as caller
ink_env::test::push_execution_context::<ink_env::DefaultEnvironment>(
accounts.bob,
callee,
1000000,
1000000,
data,
);
// Bob fails to transfers 10 tokens to Eve.
assert_eq!(
erc20.transfer(accounts.eve, 10),
Err(Error::InsufficientBalance)
);
// Alice owns all the tokens.
assert_eq!(erc20.balance_of(accounts.alice), 100);
assert_eq!(erc20.balance_of(accounts.bob), 0);
assert_eq!(erc20.balance_of(accounts.eve), 0);
// Transfer event triggered during initial construction.
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events.len(), 1);
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
}
#[ink::test]
fn transfer_from_works() {
// Constructor works.
let mut erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>()
.expect("Cannot get accounts");
// Bob fails to transfer tokens owned by Alice.
assert_eq!(
erc20.transfer_from(accounts.alice, accounts.eve, 10),
Err(Error::InsufficientAllowance)
);
// Alice approves Bob for token transfers on her behalf.
assert_eq!(erc20.approve(accounts.bob, 10), Ok(()));
// The approve event takes place.
assert_eq!(ink_env::test::recorded_events().count(), 2);
// Get contract address.
let callee = ink_env::account_id::<ink_env::DefaultEnvironment>();
// Create call.
let mut data =
ink_env::test::CallData::new(ink_env::call::Selector::new([0x00; 4])); // balance_of
data.push_arg(&accounts.bob);
// Push the new execution context to set Bob as caller.
ink_env::test::push_execution_context::<ink_env::DefaultEnvironment>(
accounts.bob,
callee,
1000000,
1000000,
data,
);
// Bob transfers tokens from Alice to Eve.
assert_eq!(
erc20.transfer_from(accounts.alice, accounts.eve, 10),
Ok(())
);
// Eve owns tokens.
assert_eq!(erc20.balance_of(accounts.eve), 10);
// Check all transfer events that happened during the previous calls:
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events.len(), 3);
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
// The second event `emitted_events[1]` is an Approve event that we skip checking.
assert_transfer_event(
&emitted_events[2],
Some(AccountId::from([0x01; 32])),
Some(AccountId::from([0x05; 32])),
10,
);
}
#[ink::test]
fn allowance_must_not_change_on_failed_transfer() {
let mut erc20 = Erc20::new(100);
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>()
.expect("Cannot get accounts");
// Alice approves Bob for token transfers on her behalf.
let alice_balance = erc20.balance_of(accounts.alice);
let initial_allowance = alice_balance + 2;
assert_eq!(erc20.approve(accounts.bob, initial_allowance), Ok(()));
// Get contract address.
let callee = ink_env::account_id::<ink_env::DefaultEnvironment>();
// Create call.
let mut data =
ink_env::test::CallData::new(ink_env::call::Selector::new([0x00; 4])); // balance_of
data.push_arg(&accounts.bob);
// Push the new execution context to set Bob as caller.
ink_env::test::push_execution_context::<ink_env::DefaultEnvironment>(
accounts.bob,
callee,
1000000,
1000000,
data,
);
// Bob tries to transfer tokens from Alice to Eve.
let emitted_events_before = ink_env::test::recorded_events();
assert_eq!(
erc20.transfer_from(accounts.alice, accounts.eve, alice_balance + 1),
Err(Error::InsufficientBalance)
);
// Allowance must have stayed the same
assert_eq!(
erc20.allowance(accounts.alice, accounts.bob),
initial_allowance
);
// No more events must have been emitted
let emitted_events_after = ink_env::test::recorded_events();
assert_eq!(emitted_events_before.count(), emitted_events_after.count());
}
}
/// For calculating the event topic hash.
struct PrefixedValue<'a, 'b, T> {
pub prefix: &'a [u8],
pub value: &'b T,
}
impl<X> scale::Encode for PrefixedValue<'_, '_, X>
where
X: scale::Encode,
{
#[inline]
fn size_hint(&self) -> usize {
self.prefix.size_hint() + self.value.size_hint()
}
#[inline]
fn encode_to<T: scale::Output + ?Sized>(&self, dest: &mut T) {
self.prefix.encode_to(dest);
self.value.encode_to(dest);
}
}
#[cfg(feature = "ink-experimental-engine")]
#[cfg(test)]
mod tests_experimental_engine {
use super::*;
use ink_env::Clear;
use ink_lang as ink;
type Event = <Erc20 as ::ink_lang::reflect::ContractEventBase>::Type;
fn assert_transfer_event(
event: &ink_env::test::EmittedEvent,
expected_from: Option<AccountId>,
expected_to: Option<AccountId>,
expected_value: Balance,
) {
let decoded_event = <Event as scale::Decode>::decode(&mut &event.data[..])
.expect("encountered invalid contract event data buffer");
if let Event::Transfer(Transfer { from, to, value }) = decoded_event {
assert_eq!(from, expected_from, "encountered invalid Transfer.from");
assert_eq!(to, expected_to, "encountered invalid Transfer.to");
assert_eq!(value, expected_value, "encountered invalid Trasfer.value");
} else {
panic!("encountered unexpected event kind: expected a Transfer event")
}
let expected_topics = vec![
encoded_into_hash(&PrefixedValue {
value: b"Erc20::Transfer",
prefix: b"",
}),
encoded_into_hash(&PrefixedValue {
prefix: b"Erc20::Transfer::from",
value: &expected_from,
}),
encoded_into_hash(&PrefixedValue {
prefix: b"Erc20::Transfer::to",
value: &expected_to,
}),
encoded_into_hash(&PrefixedValue {
prefix: b"Erc20::Transfer::value",
value: &expected_value,
}),
];
let topics = event.topics.clone();
for (n, (actual_topic, expected_topic)) in
topics.iter().zip(expected_topics).enumerate()
{
let mut topic_hash = Hash::clear();
let len = actual_topic.len();
topic_hash.as_mut()[0..len].copy_from_slice(&actual_topic[0..len]);
assert_eq!(
topic_hash, expected_topic,
"encountered invalid topic at {}",
n
);
}
}
/// The default constructor does its job.
#[ink::test]
fn new_works() {
// Constructor works.
let _erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(1, emitted_events.len());
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
}
/// The total supply was applied.
#[ink::test]
fn total_supply_works() {
// Constructor works.
let erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
// Get the token total supply.
assert_eq!(erc20.total_supply(), 100);
}
/// Get the actual balance of an account.
#[ink::test]
fn balance_of_works() {
// Constructor works
let erc20 = Erc20::new(100);
// Transfer event triggered during initial construction
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>();
// Alice owns all the tokens on contract instantiation
assert_eq!(erc20.balance_of(accounts.alice), 100);
// Bob does not owns tokens
assert_eq!(erc20.balance_of(accounts.bob), 0);
}
#[ink::test]
fn transfer_works() {
// Constructor works.
let mut erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>();
assert_eq!(erc20.balance_of(accounts.bob), 0);
// Alice transfers 10 tokens to Bob.
assert_eq!(erc20.transfer(accounts.bob, 10), Ok(()));
// Bob owns 10 tokens.
assert_eq!(erc20.balance_of(accounts.bob), 10);
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events.len(), 2);
// Check first transfer event related to ERC-20 instantiation.
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
// Check the second transfer event relating to the actual trasfer.
assert_transfer_event(
&emitted_events[1],
Some(AccountId::from([0x01; 32])),
Some(AccountId::from([0x02; 32])),
10,
);
}
#[ink::test]
fn invalid_transfer_should_fail() {
// Constructor works.
let mut erc20 = Erc20::new(100);
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>();
assert_eq!(erc20.balance_of(accounts.bob), 0);
// Set the contract as callee and Bob as caller.
let contract = ink_env::account_id::<ink_env::DefaultEnvironment>();
ink_env::test::set_callee::<ink_env::DefaultEnvironment>(contract);
ink_env::test::set_caller::<ink_env::DefaultEnvironment>(accounts.bob);
// Bob fails to transfers 10 tokens to Eve.
assert_eq!(
erc20.transfer(accounts.eve, 10),
Err(Error::InsufficientBalance)
);
// Alice owns all the tokens.
assert_eq!(erc20.balance_of(accounts.alice), 100);
assert_eq!(erc20.balance_of(accounts.bob), 0);
assert_eq!(erc20.balance_of(accounts.eve), 0);
// Transfer event triggered during initial construction.
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events.len(), 1);
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
}
#[ink::test]
fn transfer_from_works() {
// Constructor works.
let mut erc20 = Erc20::new(100);
// Transfer event triggered during initial construction.
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>();
// Bob fails to transfer tokens owned by Alice.
assert_eq!(
erc20.transfer_from(accounts.alice, accounts.eve, 10),
Err(Error::InsufficientAllowance)
);
// Alice approves Bob for token transfers on her behalf.
assert_eq!(erc20.approve(accounts.bob, 10), Ok(()));
// The approve event takes place.
assert_eq!(ink_env::test::recorded_events().count(), 2);
// Set the contract as callee and Bob as caller.
let contract = ink_env::account_id::<ink_env::DefaultEnvironment>();
ink_env::test::set_callee::<ink_env::DefaultEnvironment>(contract);
ink_env::test::set_caller::<ink_env::DefaultEnvironment>(accounts.bob);
// Bob transfers tokens from Alice to Eve.
assert_eq!(
erc20.transfer_from(accounts.alice, accounts.eve, 10),
Ok(())
);
// Eve owns tokens.
assert_eq!(erc20.balance_of(accounts.eve), 10);
// Check all transfer events that happened during the previous calls:
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events.len(), 3);
assert_transfer_event(
&emitted_events[0],
None,
Some(AccountId::from([0x01; 32])),
100,
);
// The second event `emitted_events[1]` is an Approve event that we skip checking.
assert_transfer_event(
&emitted_events[2],
Some(AccountId::from([0x01; 32])),
Some(AccountId::from([0x05; 32])),
10,
);
}
#[ink::test]
fn allowance_must_not_change_on_failed_transfer() {
let mut erc20 = Erc20::new(100);
let accounts =
ink_env::test::default_accounts::<ink_env::DefaultEnvironment>();
// Alice approves Bob for token transfers on her behalf.
let alice_balance = erc20.balance_of(accounts.alice);
let initial_allowance = alice_balance + 2;
assert_eq!(erc20.approve(accounts.bob, initial_allowance), Ok(()));
// Get contract address.
let callee = ink_env::account_id::<ink_env::DefaultEnvironment>();
ink_env::test::set_callee::<ink_env::DefaultEnvironment>(callee);
ink_env::test::set_caller::<ink_env::DefaultEnvironment>(accounts.bob);
// Bob tries to transfer tokens from Alice to Eve.
let emitted_events_before =
ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(
erc20.transfer_from(accounts.alice, accounts.eve, alice_balance + 1),
Err(Error::InsufficientBalance)
);
// Allowance must have stayed the same
assert_eq!(
erc20.allowance(accounts.alice, accounts.bob),
initial_allowance
);
// No more events must have been emitted
let emitted_events_after =
ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events_before.len(), emitted_events_after.len());
}
}
#[cfg(test)]
fn encoded_into_hash<T>(entity: &T) -> Hash
where
T: scale::Encode,
{
use ink_env::{
hash::{
Blake2x256,
CryptoHash,
HashOutput,
},
Clear,
};
let mut result = Hash::clear();
let len_result = result.as_ref().len();
let encoded = entity.encode();
let len_encoded = encoded.len();
if len_encoded <= len_result {
result.as_mut()[..len_encoded].copy_from_slice(&encoded);
return result
}
let mut hash_output = <<Blake2x256 as HashOutput>::Type as Default>::default();
<Blake2x256 as CryptoHash>::hash(&encoded, &mut hash_output);
let copy_len = core::cmp::min(hash_output.len(), len_result);
result.as_mut()[0..copy_len].copy_from_slice(&hash_output[0..copy_len]);
result
}
}
| 37.933174 | 100 | 0.530578 |
5dfd5e6ce172acfeb24311286d189ae0debf8f7b | 15,239 | //! This tests the speed of on_state_change of IngressHandler under the
//! following conditions:
//!
//! - The ingress pool is populated with a given number of ingress messages,
//! both unvalidated and validated.
//! - Some of the validated messages are about to be purged due to expiration.
//! - Some of the validated messages are about to be purged due to execution or
//! finalization.
//! - We use real (or almost real) for dependencies like crypto and
//! IngressHistoryReader.
//! - The changeset is also realistically applied.
//!
//! We vary the rate of unvalidated ingress coming into the unvalidated pool
//! between 100/s and 1000/s, and each message has a 100 bytes payload.
use criterion::{criterion_group, criterion_main, Criterion};
use ic_artifact_pool::ingress_pool::IngressPoolImpl;
use ic_config::artifact_pool::ArtifactPoolConfig;
use ic_constants::MAX_INGRESS_TTL;
use ic_ingress_manager::IngressManager;
use ic_interfaces::{
artifact_pool::UnvalidatedArtifact, ingress_manager::IngressHandler,
ingress_pool::MutableIngressPool, registry::RegistryClient, time_source::TimeSource,
};
use ic_interfaces_state_manager::Labeled;
use ic_logger::{replica_logger::no_op_logger, ReplicaLogger};
use ic_metrics::MetricsRegistry;
use ic_registry_client::client::RegistryClientImpl;
use ic_registry_keys::make_subnet_record_key;
use ic_registry_proto_data_provider::ProtoRegistryDataProvider;
use ic_registry_subnet_type::SubnetType;
use ic_replicated_state::{BitcoinState, CanisterQueues, ReplicatedState, SystemMetadata};
use ic_test_utilities::{
consensus::MockConsensusCache,
crypto::temp_crypto_component_with_fake_registry,
cycles_account_manager::CyclesAccountManagerBuilder,
history::MockIngressHistory,
mock_time,
state::ReplicatedStateBuilder,
state_manager::MockStateManager,
types::ids::{canister_test_id, node_test_id, subnet_test_id, user_test_id},
types::messages::SignedIngressBuilder,
FastForwardTimeSource,
};
use ic_test_utilities_registry::test_subnet_record;
use ic_types::{
ingress::{IngressState, IngressStatus},
malicious_flags::MaliciousFlags,
messages::{MessageId, SignedIngress},
Height, RegistryVersion, SubnetId, Time,
};
use rand::{seq::SliceRandom, Rng};
use std::collections::{BTreeMap, HashSet};
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
/// Default payload size is 100 bytes.
const PAYLOAD_SIZE: usize = 100;
/// Max ingress message per payload.
const MAX_INGRESS_COUNT_PER_PAYLOAD: usize = 1000;
/// Block time
const BLOCK_TIME: Duration = Duration::from_secs(2);
type Histories = Arc<RwLock<Vec<Arc<(Time, HashSet<MessageId>)>>>>;
struct SimulatedIngressHistory {
time_source: Arc<dyn TimeSource>,
histories: Histories,
}
impl SimulatedIngressHistory {
fn new(time_source: Arc<dyn TimeSource>) -> (Self, MockIngressHistory) {
let mut ingress_hist_reader = MockIngressHistory::new();
let histories: Histories = Arc::new(RwLock::new(Vec::new()));
let hist = histories.clone();
ingress_hist_reader
.expect_get_latest_status()
.returning(move || {
let set = hist.read().unwrap()[0].clone();
Box::new(move |ingress_id| {
if set.1.contains(ingress_id) {
IngressStatus::Unknown
} else {
IngressStatus::Known {
receiver: canister_test_id(0).get(),
user_id: user_test_id(0),
time: mock_time(),
state: IngressState::Completed(ic_types::ingress::WasmResult::Reply(
vec![],
)),
}
}
})
});
(
SimulatedIngressHistory {
time_source,
histories,
},
ingress_hist_reader,
)
}
/// Return the simulated batch time, As time_source increases by BLOCK_TIME,
/// next batch is retrieved from the pre-computed ingress histories.
fn batch_time(&self) -> Time {
let mut histories = self.histories.write().unwrap();
let mut t = histories[0].0;
while self.time_source.get_relative_time() > t + BLOCK_TIME {
histories.remove(0);
t = histories[0].0;
}
t
}
/// Build out the entire ingress history, with one set every 2 seconds.
/// For each set:
///
/// 1. We assign a non-decreasing timestamp `t` that is 2s greater than
/// previous one.
///
/// 2. It contains up to MAX_INGRESS_COUNT_PER_PAYLAD * MAX_INGRES_TTL / 2
/// messages.
///
/// 3. All messages are within expiry between `t - 2s - MAX_INGRESS_TTL` and
/// `t - 2s`.
fn set_history(&self, messages: BTreeMap<Time, MessageId>) {
let mut rng = rand::thread_rng();
let start_time = self.time_source.get_relative_time();
let end_time = *messages.keys().rev().next().unwrap();
let mut histories = vec![];
let mut time = start_time + Duration::from_secs(2);
let set_limit = MAX_INGRESS_COUNT_PER_PAYLOAD * (MAX_INGRESS_TTL.as_secs() as usize) / 2;
while time < end_time {
let min_time = if start_time + MAX_INGRESS_TTL < time {
time - MAX_INGRESS_TTL
} else {
start_time
};
let mut messages: Vec<MessageId> = messages
.range(min_time..time)
.map(|(_, v)| v.clone())
.collect();
messages.shuffle(&mut rng);
let set = messages.into_iter().take(set_limit).collect::<HashSet<_>>();
histories.push(Arc::new((time, set)));
time += Duration::from_secs(2);
}
*self.histories.write().unwrap() = histories;
}
}
/// Helper to run a single test with dependency setup.
fn run_test<T>(_test_name: &str, test: T)
where
T: FnOnce(
Arc<FastForwardTimeSource>,
ArtifactPoolConfig,
ReplicaLogger,
&SimulatedIngressHistory,
&mut IngressManager,
),
{
ic_test_utilities::with_test_replica_logger(|log| {
ic_test_utilities::artifact_pool_config::with_test_pool_config(|pool_config| {
let time_source = FastForwardTimeSource::new();
// Set initial time to non-zero
time_source
.set_time(mock_time() + Duration::from_secs(1))
.unwrap();
let (history, ingress_hist_reader) = SimulatedIngressHistory::new(time_source.clone());
let history = Arc::new(history);
let history_cl = history.clone();
let subnet_id = subnet_test_id(1);
let mut state_manager = MockStateManager::new();
state_manager
.expect_latest_state_height()
.return_const(Height::from(1));
state_manager.expect_get_latest_state().returning(move || {
let mut metadata = SystemMetadata::new(subnet_id, SubnetType::Application);
metadata.batch_time = history_cl.batch_time();
Labeled::new(
Height::from(1),
Arc::new(ReplicatedState::new_from_checkpoint(
BTreeMap::new(),
metadata,
CanisterQueues::default(),
Vec::new(),
BitcoinState::default(),
std::path::PathBuf::new(),
)),
)
});
let mut consensus_pool_cache = MockConsensusCache::new();
let time_source_cl = time_source.clone();
consensus_pool_cache
.expect_consensus_time()
.returning(move || Some(time_source_cl.get_relative_time()));
let subnet_id = subnet_test_id(0);
const VALIDATOR_NODE_ID: u64 = 42;
let ingress_signature_crypto = Arc::new(temp_crypto_component_with_fake_registry(
node_test_id(VALIDATOR_NODE_ID),
));
let mut state_manager = MockStateManager::new();
state_manager.expect_get_state_at().return_const(Ok(
ic_interfaces_state_manager::Labeled::new(
Height::new(0),
Arc::new(ReplicatedStateBuilder::default().build()),
),
));
let metrics_registry = MetricsRegistry::new();
let ingress_pool = Arc::new(RwLock::new(IngressPoolImpl::new(
pool_config.clone(),
metrics_registry.clone(),
no_op_logger(),
)));
let cycles_account_manager = Arc::new(CyclesAccountManagerBuilder::new().build());
let runtime = tokio::runtime::Runtime::new().unwrap();
let mut ingress_manager = IngressManager::new(
Arc::new(consensus_pool_cache),
Box::new(ingress_hist_reader),
ingress_pool,
setup_registry(subnet_id, runtime.handle().clone()),
ingress_signature_crypto,
metrics_registry,
subnet_id,
log.clone(),
Arc::new(state_manager),
cycles_account_manager,
MaliciousFlags::default(),
);
test(
time_source,
pool_config,
log,
&history,
&mut ingress_manager,
);
})
})
}
/// Prepare a set of unvalidated ingress messages, with expiry
/// randomly distributed over the given expiry time period.
fn prepare(time_source: &dyn TimeSource, duration: Duration, num: usize) -> Vec<SignedIngress> {
let now = time_source.get_relative_time();
let max_expiry = now + duration;
let mut rng = rand::thread_rng();
(0..num)
.map(|i| {
let expiry = std::time::Duration::from_millis(
rng.gen::<u64>() % ((max_expiry - now).as_millis() as u64),
);
SignedIngressBuilder::new()
.method_payload(vec![0; PAYLOAD_SIZE])
.nonce(i as u64)
.expiry_time(now + expiry)
.build()
})
.collect::<Vec<_>>()
}
/// Setup ingress pool with the given set of messages.
fn setup(
time_source: &FastForwardTimeSource,
pool_config: ArtifactPoolConfig,
log: ReplicaLogger,
messages: Vec<SignedIngress>,
) -> (IngressPoolImpl, BTreeMap<Time, MessageId>) {
let mut pool = IngressPoolImpl::new(pool_config, MetricsRegistry::new(), log);
let mut message_ids = BTreeMap::new();
let timestamp = time_source.get_relative_time();
for (i, ingress) in messages.into_iter().enumerate() {
message_ids.insert(ingress.expiry_time(), ingress.id());
pool.insert(UnvalidatedArtifact {
message: ingress,
peer_id: node_test_id((i % 10) as u64),
timestamp,
});
}
(pool, message_ids)
}
/// Call ingress manager on_state_change, and apply changeset to the ingress
/// pool. Return number of change actions.
fn on_state_change(pool: &mut IngressPoolImpl, manager: &IngressManager) -> usize {
let changeset = manager.on_state_change(pool);
let n = changeset.len();
pool.apply_changeset(changeset);
n
}
/// Speed test for ingress handling.
fn handle_ingress(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("handle_ingress");
group.sample_size(10);
group.measurement_time(Duration::from_secs(10));
for i in 1..=10 {
// num messages per second
let ingress_rate = i * 100;
// We don't have to run the benchmark for the full TTL interval. Ending in 30
// simulated seconds is good enough.
let time_span = Duration::from_secs(30);
// range of ingress expiry
let expiry_range = MAX_INGRESS_TTL + time_span;
let total_messages = ingress_rate * expiry_range.as_secs();
run_test(
"get_ingress_payload",
|time_source: Arc<FastForwardTimeSource>,
pool_config: ArtifactPoolConfig,
log: ReplicaLogger,
history: &SimulatedIngressHistory,
manager: &mut IngressManager| {
let name = format!("handle_ingress({})", ingress_rate);
let messages = prepare(time_source.as_ref(), expiry_range, total_messages as usize);
let (pool, message_ids) = setup(time_source.as_ref(), pool_config, log, messages);
group.bench_function(&name, |bench| {
bench.iter_custom(|iters| {
let mut elapsed = Duration::from_secs(0);
for _ in 0..iters {
let bench_start = Instant::now();
let mut ingress_pool = pool.clone();
time_source.reset();
// We skip the first MAX_INGRESS_TTL duration in order to save
// overall benchmark time. Also by this time, the ingress
// history has become fully populated.
let start = time_source.get_relative_time() + MAX_INGRESS_TTL;
time_source.set_time(start).unwrap();
history.set_history(message_ids.clone());
// Increment time every 200ms until it is over.
loop {
on_state_change(&mut ingress_pool, manager);
let now = time_source.get_relative_time();
if now >= start + time_span {
break;
}
time_source
.set_time(now + Duration::from_millis(200))
.unwrap();
}
elapsed += bench_start.elapsed();
}
elapsed
})
});
},
);
}
group.finish()
}
/// Sets up a registry client.
fn setup_registry(subnet_id: SubnetId, runtime: tokio::runtime::Handle) -> Arc<dyn RegistryClient> {
let registry_data_provider = Arc::new(ProtoRegistryDataProvider::new());
let subnet_record = test_subnet_record();
registry_data_provider
.add(
&make_subnet_record_key(subnet_id),
RegistryVersion::from(1),
Some(subnet_record),
)
.expect("Failed to add subnet record.");
let registry = Arc::new(RegistryClientImpl::new(
Arc::clone(®istry_data_provider) as Arc<_>,
None,
));
runtime.block_on(async { registry.as_ref().fetch_and_start_polling().unwrap() });
registry
}
criterion_group!(benches, handle_ingress);
criterion_main!(benches);
| 40.102632 | 100 | 0.583044 |
f45ebe3ddb5fc986c2aebcf00e20913eb6179def | 301 | pub fn is_even(num: i32) -> bool {
num % 2 == 0
}
fn main() {
println!("Hello, world!");
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_true_when_even() {
assert!(is_even(2));
}
#[test]
fn is_false_when_odd() {
assert!(!is_even(3));
}
}
| 13.086957 | 34 | 0.488372 |
080a5933ef7a6107e16fd536e49263767bf36315 | 1,812 | //use std::fs;
use std::fs::File;
use std::io::Read;
use encoding_rs_io::DecodeReaderBytes;
use xmltree::Element;
fn read_utf16(path: &str) -> String {
let mut file = DecodeReaderBytes::new(File::open(path).expect("Could not open"));
let mut buff = String::new();
file.read_to_string(&mut buff).unwrap();
buff
}
pub fn get_children_all(element: xmltree::Element) -> Vec<xmltree::Element> {
let mut child_v: Vec<xmltree::Element> = Vec::new();
let children = &element.children;
for i in 0..children.len() {
match children[i].as_element() {
Some(x) => child_v.push(x.clone()) ,
None => continue,
}
}
child_v
}
fn read_children_r(tree: &mut Vec<xmltree::Element>, element: xmltree::Element, predicate: &str) {
let children = get_children_all(element);
for i in 0..children.len() {
if children[i].name == predicate {
tree.push(children[i].clone());
}
read_children_r(tree, children[i].clone(), predicate);
}
}
pub fn get_children_r(element: xmltree::Element, predicate: &str) -> Vec<xmltree::Element> {
let mut children: Vec<xmltree::Element> = Vec::new();
read_children_r(&mut children, element, predicate);
children
}
pub fn read_xml_file(path: &str) -> Result<xmltree::Element, xmltree::ParseError> {
Element::parse(read_utf16(path).as_bytes())
}
pub fn attr(element: &xmltree::Element, pred: &str) -> String {
for i in element.attributes.iter() {
if i.0 == pred { return i.1.to_string(); }
}
String::new()
}
pub fn next(element: xmltree::Element) -> xmltree::Element {
let children = get_children_all(element.clone());
if children.len() > 1 {
return element;
}
else {
return next(children[0].clone());
}
}
| 27.876923 | 98 | 0.625828 |
e9679f6a48d843ab3afc63be2e17a0eccf34c2ec | 11,674 | extern crate resolv_conf;
use resolv_conf::{Network, ScopedIp};
use std::path::Path;
use std::io::Read;
use std::fs::File;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
#[test]
fn test_comment() {
resolv_conf::Config::parse("#").unwrap();
resolv_conf::Config::parse(";").unwrap();
resolv_conf::Config::parse("#junk").unwrap();
resolv_conf::Config::parse("# junk").unwrap();
resolv_conf::Config::parse(";junk").unwrap();
resolv_conf::Config::parse("; junk").unwrap();
}
fn ip(s: &str) -> ScopedIp {
s.parse().unwrap()
}
fn parse_str(s: &str) -> resolv_conf::Config {
resolv_conf::Config::parse(s).unwrap()
}
#[test]
fn test_basic_options() {
assert_eq!(
parse_str("nameserver 127.0.0.1").nameservers,
vec![ip("127.0.0.1")]
);
assert_eq!(
parse_str("search localnet.*").get_search(),
Some(vec!["localnet.*".to_string()]).as_ref()
);
assert_eq!(
parse_str("domain example.com.").get_domain(),
Some(String::from("example.com.")).as_ref()
);
}
#[test]
fn test_extra_whitespace() {
assert_eq!(
parse_str("domain example.com.").get_domain(),
Some(String::from("example.com.")).as_ref()
);
assert_eq!(
parse_str("domain example.com. ").get_domain(),
Some(String::from("example.com.")).as_ref()
);
// hard tabs
assert_eq!(
parse_str(" domain example.com. ").get_domain(),
Some(String::from("example.com.")).as_ref()
);
// hard tabs + spaces
assert_eq!(
parse_str(" domain example.com. ").get_domain(),
Some(String::from("example.com.")).as_ref()
);
}
#[test]
fn test_invalid_lines() {
assert!(resolv_conf::Config::parse("nameserver 10.0.0.1%1").is_err());
assert!(resolv_conf::Config::parse("nameserver 10.0.0.1.0").is_err());
assert!(resolv_conf::Config::parse("Nameserver 10.0.0.1").is_err());
assert!(resolv_conf::Config::parse("nameserver 10.0.0.1 domain foo.com").is_err());
assert!(resolv_conf::Config::parse("invalid foo.com").is_err());
assert!(resolv_conf::Config::parse("options ndots:1 foo:1").is_err());
}
#[test]
fn test_empty_line() {
assert_eq!(parse_str(""), resolv_conf::Config::new());
}
#[test]
fn test_multiple_options_on_one_line() {
let config = parse_str("options ndots:8 attempts:8 rotate inet6 no-tld-query timeout:8");
assert_eq!(config.ndots, 8);
assert_eq!(config.timeout, 8);
assert_eq!(config.attempts, 8);
assert_eq!(config.rotate, true);
assert_eq!(config.inet6, true);
assert_eq!(config.no_tld_query, true);
}
#[test]
fn test_ip() {
let parsed = ip("FE80::C001:1DFF:FEE0:0%eth0");
let address = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xc001, 0x1dff, 0xfee0, 0);
let scope = "eth0".to_string();
assert_eq!(parsed, ScopedIp::V6(address, Some(scope)));
let parsed = ip("FE80::C001:1DFF:FEE0:0%1");
let address = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xc001, 0x1dff, 0xfee0, 0);
let scope = "1".to_string();
assert_eq!(parsed, ScopedIp::V6(address, Some(scope)));
let parsed = ip("FE80::C001:1DFF:FEE0:0");
let address = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xc001, 0x1dff, 0xfee0, 0);
assert_eq!(parsed, ScopedIp::V6(address, None));
assert!("10.0.0.1%1".parse::<ScopedIp>().is_err());
assert!("10.0.0.1%eth0".parse::<ScopedIp>().is_err());
assert!("FE80::C001:1DFF:FEE0:0%".parse::<ScopedIp>().is_err());
assert!("FE80::C001:1DFF:FEE0:0% ".parse::<ScopedIp>().is_err());
let parsed = ip("192.168.10.1");
let address = Ipv4Addr::new(192, 168, 10, 1);
assert_eq!(parsed, ScopedIp::V4(address));
}
#[test]
fn test_nameserver() {
assert_eq!(
parse_str("nameserver 127.0.0.1").nameservers[0],
ip("127.0.0.1")
);
assert_eq!(
parse_str("nameserver 127.0.0.1#comment").nameservers[0],
ip("127.0.0.1")
);
assert_eq!(
parse_str("nameserver 127.0.0.1;comment").nameservers[0],
ip("127.0.0.1")
);
assert_eq!(
parse_str("nameserver 127.0.0.1 # another comment").nameservers[0],
ip("127.0.0.1")
);
assert_eq!(
parse_str("nameserver 127.0.0.1 ; ").nameservers[0],
ip("127.0.0.1")
);
assert_eq!(parse_str("nameserver ::1").nameservers[0], ip("::1"));
assert_eq!(
parse_str("nameserver 2001:db8:85a3:8d3:1319:8a2e:370:7348").nameservers[0],
ip("2001:db8:85a3:8d3:1319:8a2e:370:7348")
);
assert_eq!(
parse_str("nameserver ::ffff:192.0.2.128").nameservers[0],
ip("::ffff:192.0.2.128")
);
}
fn parse_file<P: AsRef<Path>>(path: P) -> resolv_conf::Config {
let mut data = String::new();
let mut file = File::open(path).unwrap();
file.read_to_string(&mut data).unwrap();
resolv_conf::Config::parse(&data).unwrap()
}
#[test]
fn test_parse_simple_conf() {
let mut config = resolv_conf::Config::new();
config
.nameservers
.push(ScopedIp::V4(Ipv4Addr::new(8, 8, 8, 8)));
config
.nameservers
.push(ScopedIp::V4(Ipv4Addr::new(8, 8, 4, 4)));
assert_eq!(config, parse_file("tests/resolv.conf-simple"));
}
#[test]
fn test_parse_linux_conf() {
let mut config = resolv_conf::Config::new();
config.set_domain(String::from("example.com"));
config.set_search(vec!["example.com".into(), "sub.example.com".into()]);
config.nameservers = vec![
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888),
None,
),
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8844),
None,
),
ScopedIp::V4(Ipv4Addr::new(8, 8, 8, 8)),
ScopedIp::V4(Ipv4Addr::new(8, 8, 4, 4)),
];
config.ndots = 8;
config.timeout = 8;
config.attempts = 8;
config.rotate = true;
config.inet6 = true;
config.no_tld_query = true;
config.sortlist = vec![
Network::V4(
Ipv4Addr::new(130, 155, 160, 0),
Ipv4Addr::new(255, 255, 240, 0),
),
// This fails currently
Network::V4(Ipv4Addr::new(130, 155, 0, 0), Ipv4Addr::new(255, 255, 0, 0)),
];
assert_eq!(config, parse_file("tests/resolv.conf-linux"));
}
#[test]
fn test_parse_macos_conf() {
let mut config = resolv_conf::Config::new();
config.set_domain(String::from("example.com."));
config.set_search(vec!["example.com.".into(), "sub.example.com.".into()]);
config.nameservers = vec![
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888),
None,
),
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8844),
None,
),
ScopedIp::V4(Ipv4Addr::new(8, 8, 8, 8)),
ScopedIp::V4(Ipv4Addr::new(8, 8, 4, 4)),
];
config.ndots = 8;
config.timeout = 8;
config.attempts = 8;
assert_eq!(config, parse_file("tests/resolv.conf-macos"));
}
#[test]
fn test_parse_openbsd_conf() {
let mut config = resolv_conf::Config::new();
config.set_domain(String::from("example.com"));
config.set_search(vec!["example.com".into()]);
config.nameservers = vec![
ScopedIp::V4(Ipv4Addr::new(8, 8, 8, 8)),
];
config.lookup_file_bind = true;
assert_eq!(config, parse_file("tests/resolv.conf-openbsd"));
}
#[test]
fn test_glibc_normalize() {
let mut config = resolv_conf::Config::new();
config.nameservers = vec![
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888),
None,
),
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8844),
None,
),
ScopedIp::V4(Ipv4Addr::new(8, 8, 8, 8)),
ScopedIp::V4(Ipv4Addr::new(8, 8, 4, 4)),
];
config.set_search(vec![
"a.example.com".into(),
"b.example.com".into(),
"c.example.com".into(),
"d.example.com".into(),
"e.example.com".into(),
"f.example.com".into(),
"g.example.com".into(),
"h.example.com".into(),
]);
config.glibc_normalize();
assert_eq!(
vec![
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888),
None,
),
ScopedIp::V6(
Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8844),
None,
),
ScopedIp::V4(Ipv4Addr::new(8, 8, 8, 8)),
],
config.nameservers
);
assert_eq!(
Some(&vec![
"a.example.com".into(),
"b.example.com".into(),
"c.example.com".into(),
"d.example.com".into(),
"e.example.com".into(),
"f.example.com".into()
]),
config.get_search()
);
}
#[test]
fn test_get_nameservers_or_local() {
let config = resolv_conf::Config::new();
assert_eq!(
vec![
ScopedIp::from(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))),
ScopedIp::from(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))),
],
config.get_nameservers_or_local()
);
}
#[test]
#[cfg(feature = "system")]
#[ignore]
fn test_get_system_domain() {
let config = resolv_conf::Config::new();
assert_eq!(Some("lan".into()), config.get_system_domain());
}
#[test]
fn test_default_display() {
let original_config = resolv_conf::Config::new();
let output = original_config.to_string();
let restored_config = resolv_conf::Config::parse(&output).unwrap();
assert_eq!(original_config, restored_config);
}
#[test]
fn test_non_default_display() {
let mut original_config = resolv_conf::Config::new();
original_config.nameservers = vec![
ip("192.168.0.94"),
ip("fe80::0123:4567:89ab:cdef"),
ip("fe80::0123:4567:89ab:cdef%zone"),
];
original_config.sortlist = vec![
Network::V4(
"192.168.1.94".parse().unwrap(),
"255.255.252.0".parse().unwrap(),
),
Network::V6("fe80::0123".parse().unwrap(), "fe80::cdef".parse().unwrap()),
];
original_config.set_domain("my.domain".to_owned());
original_config.set_search(
vec!["my.domain", "alt.domain"]
.into_iter()
.map(str::to_owned)
.collect(),
);
original_config.debug = true;
original_config.ndots = 4;
original_config.timeout = 20;
original_config.attempts = 5;
original_config.rotate = true;
original_config.no_check_names = true;
original_config.inet6 = true;
original_config.ip6_bytestring = true;
original_config.ip6_dotint = true;
original_config.edns0 = true;
original_config.single_request = true;
original_config.single_request_reopen = true;
original_config.no_tld_query = true;
original_config.use_vc = true;
let output = original_config.to_string();
println!("Output:\n\n{}", output);
let restored_config = resolv_conf::Config::parse(&output).unwrap();
assert_eq!(original_config, restored_config);
}
#[test]
fn test_display_preservers_last_search() {
let mut original_config = resolv_conf::Config::new();
original_config.set_search(
vec!["my.domain", "alt.domain"]
.into_iter()
.map(str::to_owned)
.collect(),
);
original_config.set_domain("my.domain".to_owned());
let output = original_config.to_string();
println!("Output:\n\n{}", output);
let restored_config = resolv_conf::Config::parse(&output).unwrap();
assert_eq!(original_config, restored_config);
}
| 29.933333 | 93 | 0.585832 |
7a8403255edb65fd058a89725d28c413f4acae1b | 1,058 | // aux-build:intra-link-proc-macro-macro.rs
// build-aux-docs
#![deny(broken_intra_doc_links)]
extern crate intra_link_proc_macro_macro;
pub use intra_link_proc_macro_macro::{DeriveA, attr_a};
use intra_link_proc_macro_macro::{DeriveB, attr_b};
// @has intra_link_proc_macro/struct.Foo.html
// @has - '//a/@href' '../intra_link_proc_macro/derive.DeriveA.html'
// @has - '//a/@href' '../intra_link_proc_macro/attr.attr_a.html'
// @has - '//a/@href' '../intra_link_proc_macro/trait.DeriveTrait.html'
// @has - '//a/@href' '../intra_link_proc_macro_macro/derive.DeriveB.html'
// @has - '//a/@href' '../intra_link_proc_macro_macro/attr.attr_b.html'
/// Link to [DeriveA], [attr_a], [DeriveB], [attr_b], [DeriveTrait]
pub struct Foo;
// @has intra_link_proc_macro/struct.Bar.html
// @has - '//a/@href' '../intra_link_proc_macro/derive.DeriveA.html'
// @has - '//a/@href' '../intra_link_proc_macro/attr.attr_a.html'
/// Link to [deriveA](derive@DeriveA) [attr](macro@attr_a)
pub struct Bar;
// this should not cause ambiguity errors
pub trait DeriveTrait {}
| 37.785714 | 74 | 0.718336 |
9c8a4a7a44f244b4d89ce7265dd0aa16373f0175 | 147,265 | use std::cell::RefCell;
use std::collections::BTreeMap;
use std::collections::{HashMap, HashSet};
use std::fmt::Write;
use std::rc::Rc;
use std::string::ToString;
use std::sync::{Arc, RwLock, RwLockReadGuard};
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
use crate::bindings::{
Binding, BindingManager, BindingStack, Bindings, Bsp, FollowerId, VariableState,
};
use crate::counter::Counter;
use crate::data_filtering::partition_equivs;
use crate::debugger::{get_binding_for_var, DebugEvent, Debugger};
use crate::error::{invalid_state, unsupported, PolarError, PolarResult, RuntimeError};
use crate::events::*;
use crate::folder::Folder;
use crate::inverter::Inverter;
use crate::kb::*;
use crate::messages::*;
use crate::numerics::*;
use crate::partial::{simplify_bindings_opt, simplify_partial, sub_this, IsaConstraintCheck};
use crate::rewrites::Renamer;
use crate::rules::*;
use crate::runnable::Runnable;
use crate::sources::Context;
use crate::terms::*;
use crate::traces::*;
use crate::visitor::{walk_term, Visitor};
pub const MAX_STACK_SIZE: usize = 10_000;
pub const DEFAULT_TIMEOUT_MS: u64 = 30_000;
#[derive(Debug, Copy, Clone, PartialOrd, PartialEq)]
pub enum LogLevel {
Trace,
Debug,
Info,
}
impl LogLevel {
fn should_print_on_level(&self, level: LogLevel) -> bool {
*self <= level
}
}
#[derive(Debug, Clone)]
#[must_use = "ignored goals are never accomplished"]
#[allow(clippy::large_enum_variant)]
pub enum Goal {
Backtrack,
Cut {
choice_index: usize, // cuts all choices in range [choice_index..]
},
Debug {
message: String,
},
Error {
error: PolarError,
},
Halt,
Isa {
left: Term,
right: Term,
},
IsMoreSpecific {
left: Arc<Rule>,
right: Arc<Rule>,
args: TermList,
},
IsSubspecializer {
answer: Symbol,
left: Term,
right: Term,
arg: Term,
},
Lookup {
dict: Dictionary,
field: Term,
value: Term,
},
LookupExternal {
call_id: u64,
instance: Term,
field: Term,
},
IsaExternal {
instance: Term,
literal: InstanceLiteral,
},
MakeExternal {
constructor: Term,
instance_id: u64,
},
NextExternal {
call_id: u64,
iterable: Term,
},
CheckError,
Noop,
Query {
term: Term,
},
PopQuery {
term: Term,
},
FilterRules {
args: TermList,
applicable_rules: Rules,
unfiltered_rules: Rules,
},
SortRules {
args: TermList,
rules: Rules,
outer: usize,
inner: usize,
},
TraceRule {
trace: Rc<Trace>,
},
TraceStackPush,
TraceStackPop,
Unify {
left: Term,
right: Term,
},
/// Run the `runnable`.
Run {
runnable: Box<dyn Runnable>,
},
/// Add a new constraint
AddConstraint {
term: Term,
},
/// TODO hack.
/// Add a new constraint
AddConstraintsBatch {
add_constraints: Rc<RefCell<Bindings>>,
},
}
#[derive(Clone, Debug)]
pub struct Choice {
pub alternatives: Vec<GoalStack>,
bsp: Bsp, // binding stack pointer
pub goals: GoalStack, // goal stack snapshot
queries: Queries, // query stack snapshot
trace: Vec<Rc<Trace>>, // trace snapshot
trace_stack: TraceStack,
}
pub type Choices = Vec<Choice>;
/// Shortcut type alias for a list of goals
pub type Goals = Vec<Goal>;
pub type TraceStack = Vec<Rc<Vec<Rc<Trace>>>>;
#[derive(Clone, Debug, Default)]
pub struct GoalStack(Vec<Rc<Goal>>);
impl GoalStack {
fn new_reversed(goals: Goals) -> Self {
Self(goals.into_iter().rev().map(Rc::new).collect())
}
}
impl std::ops::Deref for GoalStack {
type Target = Vec<Rc<Goal>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for GoalStack {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
pub type Queries = TermList;
pub fn compare(
op: Operator,
left: &Term,
right: &Term,
context: Option<&Term>,
) -> PolarResult<bool> {
use {Operator::*, Value::*};
// Coerce booleans to integers.
// FIXME(gw) why??
fn to_int(x: bool) -> Numeric {
Numeric::Integer(if x { 1 } else { 0 })
}
fn compare<T: PartialOrd>(op: Operator, left: T, right: T) -> PolarResult<bool> {
match op {
Lt => Ok(left < right),
Leq => Ok(left <= right),
Gt => Ok(left > right),
Geq => Ok(left >= right),
Eq => Ok(left == right),
Neq => Ok(left != right),
_ => invalid_state(format!("`{}` is not a comparison operator", op)),
}
}
match (left.value(), right.value()) {
(Boolean(l), Boolean(r)) => compare(op, &to_int(*l), &to_int(*r)),
(Boolean(l), Number(r)) => compare(op, &to_int(*l), r),
(Number(l), Boolean(r)) => compare(op, l, &to_int(*r)),
(Number(l), Number(r)) => compare(op, l, r),
(String(l), String(r)) => compare(op, l, r),
_ => {
let context = context.expect("should only be None in Grounder, where we unwrap anyway");
unsupported(context.to_string(), context)
}
}
}
#[derive(Clone)]
pub struct PolarVirtualMachine {
/// Stacks.
pub goals: GoalStack,
binding_manager: BindingManager,
choices: Choices,
pub queries: Queries,
pub tracing: bool,
pub trace_stack: TraceStack, // Stack of traces higher up the tree.
pub trace: Vec<Rc<Trace>>, // Traces for the current level of the trace tree.
// Errors from outside the vm.
pub external_error: Option<String>,
#[cfg(not(target_arch = "wasm32"))]
query_start_time: Option<std::time::Instant>,
#[cfg(target_arch = "wasm32")]
query_start_time: Option<f64>,
query_timeout_ms: u64,
/// Maximum size of goal stack
stack_limit: usize,
/// Binding stack constant below here.
csp: Bsp,
/// Interactive debugger.
pub debugger: Debugger,
/// Rules and types.
pub kb: Arc<RwLock<KnowledgeBase>>,
/// Call ID -> result variable name table.
call_id_symbols: HashMap<u64, Symbol>,
/// Logging flag.
log_level: Option<LogLevel>,
polar_log_stderr: bool,
polar_trace_mute: bool,
// Other flags.
pub query_contains_partial: bool,
pub inverting: bool,
/// Output messages.
pub messages: MessageQueue,
}
impl Default for PolarVirtualMachine {
fn default() -> Self {
PolarVirtualMachine::new(
Arc::new(RwLock::new(KnowledgeBase::default())),
false,
vec![],
// Messages will not be exposed, only use default() for testing.
MessageQueue::new(),
)
}
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(js_namespace = console, js_name = error)]
fn console_error(a: &str);
}
// Methods which aren't goals/instructions.
impl PolarVirtualMachine {
/// Make a new virtual machine with an initial list of goals.
/// Reverse the goal list for the sanity of callers.
pub fn new(
kb: Arc<RwLock<KnowledgeBase>>,
tracing: bool,
goals: Goals,
messages: MessageQueue,
) -> Self {
let query_timeout_ms = std::env::var("POLAR_TIMEOUT_MS")
.ok()
.and_then(|timeout_str| timeout_str.parse::<u64>().ok())
.unwrap_or(DEFAULT_TIMEOUT_MS);
let constants = kb
.read()
.expect("cannot acquire KB read lock")
.get_registered_constants()
.clone();
let mut vm = Self {
goals: GoalStack::new_reversed(goals),
binding_manager: BindingManager::new(),
query_start_time: None,
query_timeout_ms,
stack_limit: MAX_STACK_SIZE,
csp: Bsp::default(),
choices: vec![],
queries: vec![],
tracing,
trace_stack: vec![],
trace: vec![],
external_error: None,
debugger: Debugger::default(),
kb,
call_id_symbols: HashMap::new(),
// `log` controls internal VM logging
log_level: None,
// `polar_log_stderr` prints things immediately to stderr
polar_log_stderr: false,
polar_trace_mute: false,
query_contains_partial: false,
inverting: false,
messages,
};
vm.bind_constants(constants);
vm.query_contains_partial();
let polar_log = std::env::var("POLAR_LOG");
vm.set_logging_options(None, polar_log.ok());
vm
}
pub fn set_logging_options(&mut self, rust_log: Option<String>, polar_log: Option<String>) {
let polar_log = polar_log.unwrap_or_else(|| "".to_string());
let polar_log_vars: HashSet<String> = polar_log
.split(',')
.filter(|v| !v.is_empty())
.map(|s| s.to_lowercase())
.collect();
self.polar_log_stderr = polar_log_vars.contains(&"now".to_string());
// TODO: @patrickod remove `RUST_LOG` from node lib & drop this option.
self.log_level = if rust_log.is_some() {
Some(LogLevel::Trace)
} else {
None
};
// The values `off` and `0` mute all logging and take precedence over any other coexisting value.
// If POLAR_LOG is specified we attempt to match the level requested, other default to INFO
if !polar_log_vars.is_empty()
&& polar_log_vars.is_disjoint(&HashSet::from(["off".to_string(), "0".to_string()]))
{
self.log_level = if polar_log_vars.contains(&LogLevel::Trace.to_string()) {
Some(LogLevel::Trace)
} else if polar_log_vars.contains(&LogLevel::Debug.to_string()) {
Some(LogLevel::Debug)
} else {
Some(LogLevel::Info)
}
}
}
fn query_contains_partial(&mut self) {
struct VarVisitor<'vm> {
has_partial: bool,
vm: &'vm PolarVirtualMachine,
}
impl<'vm> Visitor for VarVisitor<'vm> {
fn visit_variable(&mut self, v: &Symbol) {
if matches!(self.vm.variable_state(v), VariableState::Partial) {
self.has_partial = true;
}
}
}
let mut visitor = VarVisitor {
has_partial: false,
vm: self,
};
self.query_contains_partial = self.goals.iter().any(|goal| {
if let Goal::Query { term } = goal.as_ref() {
walk_term(&mut visitor, term);
visitor.has_partial
} else {
false
}
});
}
#[cfg(test)]
pub fn new_test(kb: Arc<RwLock<KnowledgeBase>>, tracing: bool, goals: Goals) -> Self {
PolarVirtualMachine::new(kb, tracing, goals, MessageQueue::new())
}
/// Clone self, replacing the goal stack and retaining only the current bindings.
pub fn clone_with_goals(&self, goals: Goals) -> Self {
let mut vm = Self::new(self.kb.clone(), self.tracing, goals, self.messages.clone());
vm.binding_manager.clone_from(&self.binding_manager);
vm.query_contains_partial = self.query_contains_partial;
vm.debugger = self.debugger.clone();
vm
}
#[cfg(test)]
fn set_stack_limit(&mut self, limit: usize) {
self.stack_limit = limit;
}
fn kb(&self) -> RwLockReadGuard<KnowledgeBase> {
self.kb.read().unwrap()
}
fn new_id(&self) -> u64 {
self.kb().new_id()
}
pub fn id_counter(&self) -> Counter {
self.kb().id_counter()
}
fn new_call_id(&mut self, symbol: &Symbol) -> u64 {
let call_id = self.new_id();
self.call_id_symbols.insert(call_id, symbol.clone());
call_id
}
fn new_call_var(&mut self, var_prefix: &str, initial_value: Value) -> (u64, Term) {
let sym = self.kb().gensym(var_prefix);
self.bind(&sym, Term::from(initial_value)).unwrap();
let call_id = self.new_call_id(&sym);
(call_id, Term::from(sym))
}
fn get_call_sym(&self, call_id: u64) -> &Symbol {
self.call_id_symbols
.get(&call_id)
.expect("unregistered external call ID")
}
/// Try to achieve one goal. Return `Some(QueryEvent)` if an external
/// result is needed to achieve it, or `None` if it can run internally.
fn next(&mut self, goal: Rc<Goal>) -> PolarResult<QueryEvent> {
self.log(LogLevel::Trace, || goal.to_string(), &[]);
self.check_timeout()?;
match goal.as_ref() {
Goal::Backtrack => self.backtrack()?,
Goal::Cut { choice_index } => self.cut(*choice_index),
Goal::Debug { message } => return Ok(self.debug(message)),
Goal::Halt => return Ok(self.halt()),
Goal::Error { error } => return Err(error.clone()),
Goal::Isa { left, right } => self.isa(left, right)?,
Goal::IsMoreSpecific { left, right, args } => {
self.is_more_specific(left, right, args)?
}
Goal::IsSubspecializer {
answer,
left,
right,
arg,
} => return self.is_subspecializer(answer, left, right, arg),
Goal::Lookup { dict, field, value } => self.lookup(dict, field, value)?,
Goal::LookupExternal {
call_id,
instance,
field,
} => return self.lookup_external(*call_id, instance, field),
Goal::IsaExternal { instance, literal } => return self.isa_external(instance, literal),
Goal::MakeExternal {
constructor,
instance_id,
} => return Ok(self.make_external(constructor, *instance_id)),
Goal::NextExternal { call_id, iterable } => {
return self.next_external(*call_id, iterable)
}
Goal::CheckError => return self.check_error(),
Goal::Noop => {}
Goal::Query { term } => {
let result = self.query(term);
self.maybe_break(DebugEvent::Query)?;
return result;
}
Goal::PopQuery { .. } => self.pop_query(),
Goal::FilterRules {
applicable_rules,
unfiltered_rules,
args,
} => self.filter_rules(applicable_rules, unfiltered_rules, args)?,
Goal::SortRules {
rules,
outer,
inner,
args,
} => self.sort_rules(rules, args, *outer, *inner)?,
Goal::TraceStackPush => {
self.trace_stack.push(Rc::new(self.trace.clone()));
self.trace = vec![];
}
Goal::TraceStackPop => {
let mut children = self.trace.clone();
self.trace = self.trace_stack.pop().unwrap().as_ref().clone();
let mut trace = self.trace.pop().unwrap();
let trace = Rc::make_mut(&mut trace);
trace.children.append(&mut children);
self.trace.push(Rc::new(trace.clone()));
self.maybe_break(DebugEvent::Pop)?;
}
Goal::TraceRule { trace } => {
if let Node::Rule(rule) = &trace.node {
self.log(LogLevel::Info, || format!("RULE: {}", rule), &[]);
}
self.trace.push(trace.clone());
self.maybe_break(DebugEvent::Rule)?;
}
Goal::Unify { left, right } => self.unify(left, right)?,
Goal::AddConstraint { term } => self.add_constraint(term)?,
Goal::AddConstraintsBatch { add_constraints } => {
add_constraints
.borrow_mut()
.drain()
.try_for_each(|(_, constraint)| self.add_constraint(&constraint))?
}
Goal::Run { runnable } => return self.run_runnable(runnable.clone_runnable()),
}
Ok(QueryEvent::None)
}
/// Push a goal onto the goal stack.
pub fn push_goal(&mut self, goal: Goal) -> PolarResult<()> {
use {Goal::*, VariableState::Unbound};
if self.goals.len() >= self.stack_limit {
let msg = format!("Goal stack overflow! MAX_GOALS = {}", self.stack_limit);
Err(RuntimeError::StackOverflow { msg }.into())
} else if matches!(goal, LookupExternal { call_id, ..} | NextExternal { call_id, .. } if self.variable_state(self.get_call_sym(call_id)) != Unbound)
{
invalid_state("The call_id result variables for LookupExternal and NextExternal goals must be unbound.")
} else {
self.goals.push(Rc::new(goal));
Ok(())
}
}
/// Push a non-trivial choice onto the choice stack.
///
/// Params:
///
/// - `alternatives`: an ordered list of alternatives to try in the choice.
/// The first element is the first alternative to try.
///
/// Do not modify the goals stack. This function defers execution of the
/// choice until a backtrack occurs. To immediately execute the choice on
/// top of the current stack, use `choose`.
fn push_choice<I>(&mut self, alternatives: I) -> PolarResult<()>
where
I: IntoIterator<Item = Goals>,
I::IntoIter: std::iter::DoubleEndedIterator,
{
// Make sure that alternatives are executed in order of first to last.
let alternatives = alternatives
.into_iter()
.rev()
.map(GoalStack::new_reversed)
.collect();
if self.choices.len() >= self.stack_limit {
let msg = "Too many choices.".to_owned();
Err(RuntimeError::StackOverflow { msg }.into())
} else {
self.choices.push(Choice {
alternatives,
bsp: self.bsp(),
goals: self.goals.clone(),
queries: self.queries.clone(),
trace: self.trace.clone(),
trace_stack: self.trace_stack.clone(),
});
Ok(())
}
}
/// Push a choice onto the choice stack, and execute immediately by
/// pushing the first alternative onto the goals stack
///
/// Params:
///
/// - `alternatives`: an ordered list of alternatives to try in the choice.
/// The first element is the first alternative to try.
fn choose<I>(&mut self, alternatives: I) -> PolarResult<()>
where
I: IntoIterator<Item = Goals>,
I::IntoIter: std::iter::DoubleEndedIterator,
{
let mut alternatives_iter = alternatives.into_iter();
if let Some(alternative) = alternatives_iter.next() {
self.push_choice(alternatives_iter)?;
self.append_goals(alternative)
} else {
self.backtrack()
}
}
/// If each goal of `conditional` succeeds, execute `consequent`;
/// otherwise, execute `alternative`. The branches are entered only
/// by backtracking so that bindings established during the execution
/// of `conditional` are always unwound.
fn choose_conditional(
&mut self,
mut conditional: Goals,
consequent: Goals,
mut alternative: Goals,
) -> PolarResult<()> {
// If the conditional fails, cut the consequent.
let cut_consequent = Goal::Cut {
choice_index: self.choices.len(),
};
alternative.insert(0, cut_consequent);
// If the conditional succeeds, cut the alternative and backtrack to this choice point.
self.push_choice(vec![consequent])?;
let cut_alternative = Goal::Cut {
choice_index: self.choices.len(),
};
conditional.push(cut_alternative);
conditional.push(Goal::Backtrack);
self.choose(vec![conditional, alternative])
}
/// Push multiple goals onto the stack in reverse order.
fn append_goals<I>(&mut self, goals: I) -> PolarResult<()>
where
I: IntoIterator<Item = Goal>,
I::IntoIter: std::iter::DoubleEndedIterator,
{
goals.into_iter().rev().try_for_each(|g| self.push_goal(g))
}
/// Rebind an external answer variable.
///
/// DO NOT USE THIS TO REBIND ANOTHER VARIABLE (see unsafe_rebind doc string).
fn rebind_external_answer(&mut self, var: &Symbol, val: Term) {
self.binding_manager.unsafe_rebind(var, val);
}
/// Push a binding onto the binding stack.
pub fn bind(&mut self, var: &Symbol, val: Term) -> PolarResult<()> {
self.log(
LogLevel::Trace,
|| format!("⇒ bind: {} ← {}", var, val),
&[],
);
if let Some(goal) = self.binding_manager.bind(var, val)? {
self.push_goal(goal)
} else {
Ok(())
}
}
pub fn add_binding_follower(&mut self) -> FollowerId {
self.binding_manager.add_follower(BindingManager::new())
}
pub fn remove_binding_follower(&mut self, follower_id: &FollowerId) -> Option<BindingManager> {
self.binding_manager.remove_follower(follower_id)
}
/// Add a single constraint operation to the variables referenced in it.
/// Precondition: Operation is either binary or ternary (binary + result var),
/// and at least one of the first two arguments is an unbound variable.
fn add_constraint(&mut self, term: &Term) -> PolarResult<()> {
self.log(
LogLevel::Trace,
|| format!("⇒ add_constraint: {}", term),
&[],
);
self.binding_manager.add_constraint(term)
}
/// Augment the bindings stack with constants from a hash map.
/// There must be no temporaries bound yet.
fn bind_constants(&mut self, bindings: Bindings) {
assert_eq!(self.bsp(), self.csp);
for (var, value) in bindings.iter() {
self.bind(var, value.clone()).unwrap();
}
self.csp = self.bsp();
}
/// Retrieve the current non-constant bindings as a hash map.
pub fn bindings(&self, include_temps: bool) -> Bindings {
self.binding_manager
.bindings_after(include_temps, &self.csp)
}
/// Retrive internal binding stack for debugger.
pub fn bindings_debug(&self) -> &BindingStack {
self.binding_manager.bindings_debug()
}
/// Returns bindings for all vars used by terms in terms.
pub fn relevant_bindings(&self, terms: &[&Term]) -> Bindings {
let mut variables = HashSet::new();
for t in terms {
t.variables(&mut variables);
}
self.binding_manager.variable_bindings(&variables)
}
/// Return the current binding stack pointer.
fn bsp(&self) -> Bsp {
self.binding_manager.bsp()
}
/// Investigate the state of a variable at some point and return a variable state variant.
pub fn variable_state_at_point(&self, variable: &Symbol, bsp: &Bsp) -> VariableState {
self.binding_manager.variable_state_at_point(variable, bsp)
}
/// Investigate the current state of a variable and return a variable state variant.
fn variable_state(&self, variable: &Symbol) -> VariableState {
self.binding_manager.variable_state(variable)
}
/// Recursively dereference variables in a term, including subterms, except operations.
fn deref(&self, term: &Term) -> Term {
self.binding_manager.deep_deref(term)
}
/// Generate a fresh set of variables for a rule.
fn rename_rule_vars(&self, rule: &Rule) -> Rule {
let kb = &*self.kb.read().unwrap();
let mut renamer = Renamer::new(kb);
renamer.fold_rule(rule.clone())
}
/// Push or print a message to the output stream.
#[cfg(not(target_arch = "wasm32"))]
fn print<S: Into<String>>(&self, message: S) {
let message = message.into();
if self.polar_log_stderr {
eprintln!("{}", message);
} else {
self.messages.push(MessageKind::Print, message);
}
}
/// Push or print a message to the WASM output stream.
#[cfg(target_arch = "wasm32")]
fn print<S: Into<String>>(&self, message: S) {
let message = message.into();
if self.polar_log_stderr {
console_error(&message);
} else {
self.messages.push(MessageKind::Print, message);
}
}
fn log<F, R>(&self, level: LogLevel, message_fn: F, terms: &[&Term])
where
F: FnOnce() -> R,
R: AsRef<str>,
{
if let Some(configured_log_level) = self.log_level {
// preserve the old `polar_log_mute` behavior which omits parameter
// specialization checking Unify, IsA and other events from the log
if level == LogLevel::Trace && self.polar_trace_mute {
return;
}
if configured_log_level.should_print_on_level(level) {
let mut indent = String::new();
for _ in 0..=self.queries.len() {
indent.push_str(" ");
}
let message = message_fn();
let lines = message.as_ref().split('\n').collect::<Vec<&str>>();
if let Some(line) = lines.first() {
let prefix = format!("[oso][{}] {}", level, &indent);
let mut msg = format!("{}{}", prefix, line);
// print BINDINGS: { .. } only for TRACE logs
if !terms.is_empty() && configured_log_level == LogLevel::Trace {
let relevant_bindings = self.relevant_bindings(terms);
msg.push_str(&format!(
", BINDINGS: {{{}}}",
relevant_bindings
.iter()
.map(|(var, val)| format!("{} => {}", var.0, val))
.collect::<Vec<String>>()
.join(", ")
));
}
self.print(msg);
for line in &lines[1..] {
self.print(format!("{}{}", prefix, line));
}
}
}
}
}
/// Get the query stack as a string for printing in error messages.
pub(crate) fn stack_trace(&self) -> String {
let mut trace_stack = self.trace_stack.clone();
let mut trace = self.trace.clone();
// Build linear stack from trace tree. Not just using query stack because it doesn't
// know about rules, query stack should really use this too.
let mut stack = vec![];
while let Some(t) = trace.last() {
stack.push(t.clone());
trace = trace_stack
.pop()
.map(|ts| ts.as_ref().clone())
.unwrap_or_else(Vec::new);
}
stack.reverse();
// Only index queries, not rules. Rule nodes are just used as context for where the query
// comes from.
let mut i = stack.iter().filter_map(|t| t.term()).count();
let mut st = "trace (most recent evaluation last):\n".to_owned();
let mut rule = None;
for t in stack {
match &t.node {
Node::Rule(r) => {
rule = Some(r.clone());
}
Node::Term(t) => {
if matches!(t.value(), Value::Expression(Operation { operator: Operator::And, args}) if args.len() == 1)
{
continue;
}
i -= 1;
let _ = writeln!(st, " {:03}: {}", i, self.term_source(t, false));
if let Some(context) = t.parsed_context() {
if let Some(rule) = &rule {
let _ = write!(st, " in rule {}", rule.name);
} else {
let _ = write!(st, " in query");
}
let _ = writeln!(st, "{}", context.source_position());
};
}
}
}
st
}
#[cfg(not(target_arch = "wasm32"))]
fn query_duration(&self) -> u64 {
let now = std::time::Instant::now();
let start = self.query_start_time.expect("Query start not recorded");
(now - start).as_millis() as u64
}
#[cfg(target_arch = "wasm32")]
fn query_duration(&self) -> u64 {
let now: f64 = js_sys::Date::now();
let start = self.query_start_time.expect("Query start not recorded");
(now - start) as u64
}
fn is_query_timeout_disabled(&self) -> bool {
self.query_timeout_ms == 0
}
fn check_timeout(&self) -> PolarResult<()> {
if self.is_query_timeout_disabled() {
// Useful for debugging
return Ok(());
}
let elapsed = self.query_duration();
let timeout = self.query_timeout_ms;
if elapsed > timeout {
return Err(RuntimeError::QueryTimeout { elapsed, timeout }.into());
}
Ok(())
}
}
/// Implementations of instructions.
impl PolarVirtualMachine {
/// Remove all bindings after the last choice point, and try the
/// next available alternative. If no choice is possible, halt.
fn backtrack(&mut self) -> PolarResult<()> {
self.log(LogLevel::Trace, || "BACKTRACK", &[]);
loop {
match self.choices.pop() {
None => return self.push_goal(Goal::Halt),
Some(Choice {
mut alternatives,
bsp,
goals,
queries,
trace,
trace_stack,
}) => {
self.binding_manager.backtrack(&bsp);
if let Some(mut alternative) = alternatives.pop() {
if alternatives.is_empty() {
self.goals = goals;
self.queries = queries;
self.trace = trace;
self.trace_stack = trace_stack;
} else {
self.goals.clone_from(&goals);
self.queries.clone_from(&queries);
self.trace.clone_from(&trace);
self.trace_stack.clone_from(&trace_stack);
self.choices.push(Choice {
alternatives,
bsp,
goals,
queries,
trace,
trace_stack,
})
}
self.goals.append(&mut alternative);
break;
}
}
}
}
Ok(())
}
/// Commit to the current choice.
fn cut(&mut self, index: usize) {
self.choices.truncate(index);
}
/// Clean up the query stack after completing a query.
fn pop_query(&mut self) {
self.queries.pop();
}
/// Interact with the debugger.
fn debug(&mut self, message: &str) -> QueryEvent {
// Query start time is reset when a debug event occurs.
self.query_start_time.take();
QueryEvent::Debug {
message: message.to_string(),
}
}
/// Halt the VM by clearing all goals and choices.
fn halt(&mut self) -> QueryEvent {
self.log(LogLevel::Trace, || "HALT", &[]);
self.goals.clear();
self.choices.clear();
QueryEvent::Done { result: true }
}
/// Comparison operator that essentially performs partial unification.
#[allow(clippy::many_single_char_names)]
fn isa(&mut self, left: &Term, right: &Term) -> PolarResult<()> {
self.log(
LogLevel::Trace,
|| format!("MATCHES: {} matches {}", left, right),
&[left, right],
);
match (left.value(), right.value()) {
(_, Value::Dictionary(_)) => todo!("make this case unreachable"),
(Value::Expression(_), _) | (_, Value::Expression(_)) => {
unreachable!("encountered bare expression")
}
_ if self.kb.read().unwrap().is_union(left) => {
// A union (currently) only matches itself.
//
// TODO(gj): when we have unions beyond `Actor` and `Resource`, we'll need to be
// smarter about this check since UnionA is more specific than UnionB if UnionA is
// a member of UnionB.
let unions_match = (left.is_actor_union() && right.is_actor_union())
|| (left.is_resource_union() && right.is_resource_union());
if !unions_match {
return self.push_goal(Goal::Backtrack);
}
}
_ if self.kb.read().unwrap().is_union(right) => self.isa_union(left, right)?,
// TODO(gj): (Var, Rest) + (Rest, Var) cases might be unreachable.
(Value::Variable(l), Value::Variable(r))
| (Value::Variable(l), Value::RestVariable(r))
| (Value::RestVariable(l), Value::Variable(r))
| (Value::RestVariable(l), Value::RestVariable(r)) => {
// Two variables.
match (self.variable_state(l), self.variable_state(r)) {
(VariableState::Bound(x), _) => self.push_goal(Goal::Isa {
left: x,
right: right.clone(),
})?,
(_, VariableState::Bound(y)) => self.push_goal(Goal::Isa {
left: left.clone(),
right: y,
})?,
(_, _) => self.add_constraint(&term!(op!(Isa, left.clone(), right.clone())))?,
}
}
(Value::Variable(l), _) | (Value::RestVariable(l), _) => match self.variable_state(l) {
VariableState::Bound(x) => self.push_goal(Goal::Isa {
left: x,
right: right.clone(),
})?,
_ => self.isa_expr(left, right)?,
},
(_, Value::Variable(r)) | (_, Value::RestVariable(r)) => match self.variable_state(r) {
VariableState::Bound(y) => self.push_goal(Goal::Isa {
left: left.clone(),
right: y,
})?,
_ => self.push_goal(Goal::Unify {
left: left.clone(),
right: right.clone(),
})?,
},
(Value::List(left), Value::List(right)) => {
self.unify_lists(left, right, |(left, right)| Goal::Isa {
left: left.clone(),
right: right.clone(),
})?;
}
(Value::Dictionary(left), Value::Pattern(Pattern::Dictionary(right))) => {
// Check that the left is more specific than the right.
let left_fields: HashSet<&Symbol> = left.fields.keys().collect();
let right_fields: HashSet<&Symbol> = right.fields.keys().collect();
if !right_fields.is_subset(&left_fields) {
return self.push_goal(Goal::Backtrack);
}
// For each field on the right, isa its value against the corresponding value on
// the left.
for (k, v) in right.fields.iter() {
let left = left
.fields
.get(k)
.expect("left fields should be a superset of right fields")
.clone();
self.push_goal(Goal::Isa {
left,
right: v.clone(),
})?;
}
}
(_, Value::Pattern(Pattern::Dictionary(right))) => {
// For each field in the dict, look up the corresponding field on the instance and
// then isa them.
for (field, right_value) in right.fields.iter() {
// Generate symbol for the lookup result and leave the variable unbound, so that unification with the result does not fail.
// Unification with the lookup result happens in `fn external_call_result()`.
let answer = self.kb.read().unwrap().gensym("isa_value");
let call_id = self.new_call_id(&answer);
let lookup = Goal::LookupExternal {
instance: left.clone(),
call_id,
field: right_value.clone_with_value(Value::String(field.0.clone())),
};
let isa = Goal::Isa {
left: Term::from(answer),
right: right_value.clone(),
};
self.append_goals(vec![lookup, isa])?;
}
}
(_, Value::Pattern(Pattern::Instance(right_literal))) => {
// Check fields
self.push_goal(Goal::Isa {
left: left.clone(),
right: right.clone_with_value(Value::Pattern(Pattern::Dictionary(
right_literal.fields.clone(),
))),
})?;
// attempt an in-core IsA check if we have the necessary
// class_id information
if let Value::ExternalInstance(ExternalInstance {
class_id: Some(class_id),
..
}) = *left.value()
{
let isa = {
let kb = self.kb.read().unwrap();
let right_id = kb
.get_class_id_for_symbol(&right_literal.tag)
.expect("no class ID for symbol");
let left_symbol = kb
.get_symbol_for_class_id(&class_id)
.expect("no symbol for class ID");
if let Some(mro) = kb.mro.get(left_symbol) {
mro.contains(right_id)
} else {
false
}
};
if !isa {
self.push_goal(Goal::Backtrack)?;
}
// default to IsaExternal when no `class_id` information is available
} else {
// Check class
self.push_goal(Goal::IsaExternal {
instance: left.clone(),
literal: right_literal.clone(),
})?;
}
}
// Default case: x isa y if x = y.
_ => self.push_goal(Goal::Unify {
left: left.clone(),
right: right.clone(),
})?,
}
Ok(())
}
fn get_names(&self, s: &Symbol) -> HashSet<Symbol> {
let cycles = self
.binding_manager
.get_constraints(s)
.constraints()
.into_iter()
.filter_map(|con| match con.operator {
Operator::Unify | Operator::Eq => {
if let (Ok(l), Ok(r)) = (con.args[0].as_symbol(), con.args[1].as_symbol()) {
Some((l.clone(), r.clone()))
} else {
None
}
}
_ => None,
});
partition_equivs(cycles)
.into_iter()
.find(|c| c.contains(s))
.unwrap_or_else(|| {
let mut hs = HashSet::with_capacity(1);
hs.insert(s.clone());
hs
})
}
fn isa_expr(&mut self, left: &Term, right: &Term) -> PolarResult<()> {
match right.value() {
Value::Pattern(Pattern::Dictionary(fields)) => {
// Produce a constraint like left.field = value
let to_unify = |(field, value): (&Symbol, &Term)| -> Term {
let value = self.deref(value);
let field = right.clone_with_value(value!(field.0.as_ref()));
let left = left.clone_with_value(value!(op!(Dot, left.clone(), field)));
term!(op!(Unify, left, value))
};
let constraints = fields.fields.iter().rev().map(to_unify).collect::<Vec<_>>();
for op in constraints {
self.add_constraint(&op)?;
}
}
Value::Pattern(Pattern::Instance(InstanceLiteral { fields, tag })) => {
// TODO(gj): assert that a simplified expression contains at most 1 unification
// involving a particular variable.
// TODO(gj): Ensure `op!(And) matches X{}` doesn't die after these changes.
let var = left.as_symbol()?;
// Get the existing partial on the LHS variable.
let partial = self.binding_manager.get_constraints(var);
let names = self.get_names(var);
let output = names.clone();
let partial = partial.into();
let (simplified, _) = simplify_partial(var, partial, output, false);
let simplified = simplified.as_expression()?;
// TODO (dhatch): what if there is more than one var = dot_op constraint?
// What if the one there is is in a not, or an or, or something
let lhss_of_matches = simplified
.constraints()
.into_iter()
.filter_map(|c| {
// If the simplified partial includes a constraint of form:
// `v = dot_op`, `dot_op = v`, or `v in dot_op`
// and the receiver of the dot operation is either
// `var` or an alias thereof, use the dot op as the LHS of the matches.
if c.operator != Operator::Unify && c.operator != Operator::In {
None
} else if matches!(c.args[0].as_symbol(), Ok(s) if names.contains(s)) &&
matches!(c.args[1].as_expression(), Ok(o) if o.operator == Operator::Dot) {
Some(c.args[1].clone())
} else if c.operator == Operator::Unify && matches!(c.args[1].as_symbol(), Ok(s) if names.contains(s)) &&
// only look for var on the RHS of a unfication (i.e. not on the RHS of an `in`)
matches!(c.args[0].as_expression(), Ok(o) if o.operator == Operator::Dot) {
Some(c.args[0].clone())
} else {
None
}
})
.chain(std::iter::once(left.clone()));
// Construct field-less matches operation.
let tag_pattern = right.clone_with_value(value!(pattern!(instance!(tag.clone()))));
let type_constraint = op!(Isa, left.clone(), tag_pattern);
let new_matcheses =
lhss_of_matches.map(|lhs_of_matches| op!(Isa, lhs_of_matches, right.clone()));
let runnables = new_matcheses
.map(|new_matches| {
let runnable = Box::new(IsaConstraintCheck::new(
simplified.constraints(),
new_matches,
names.clone(),
));
Goal::Run { runnable }
})
.collect();
// Construct field constraints.
let field_constraints = fields.fields.iter().rev().map(|(f, v)| {
let v = self.deref(v);
let field = right.clone_with_value(value!(f.0.as_ref()));
let left = left.clone_with_value(value!(op!(Dot, left.clone(), field)));
op!(Unify, left, v)
});
let mut add_constraints = vec![type_constraint];
add_constraints.extend(field_constraints.into_iter());
// Run compatibility check.
self.choose_conditional(
runnables,
add_constraints
.into_iter()
.map(|op| Goal::AddConstraint { term: op.into() })
.collect(),
vec![Goal::CheckError, Goal::Backtrack],
)?;
}
// if the RHS isn't a pattern or a dictionary, we'll fall back to unifying
// this is not the _best_ behaviour, but it's what we've been doing
// previously
_ => self.push_goal(Goal::Unify {
left: left.clone(),
right: right.clone(),
})?,
}
Ok(())
}
/// To evaluate `left matches Union`, look up `Union`'s member classes and create a choicepoint
/// to check if `left` matches any of them.
fn isa_union(&mut self, left: &Term, union: &Term) -> PolarResult<()> {
let member_isas = {
let kb = self.kb.read().unwrap();
let members = kb.get_union_members(union).iter();
members
.map(|member| {
let tag = member.as_symbol().unwrap().0.as_str();
member.clone_with_value(value!(pattern!(instance!(tag))))
})
.map(|pattern| {
vec![Goal::Isa {
left: left.clone(),
right: pattern,
}]
})
.collect::<Vec<Goals>>()
};
self.choose(member_isas)
}
fn lookup(&mut self, dict: &Dictionary, field: &Term, value: &Term) -> PolarResult<()> {
let field = self.deref(field);
match field.value() {
Value::Variable(_) => {
let mut alternatives = vec![];
for (k, v) in &dict.fields {
let mut goals: Goals = vec![];
// attempt to unify dict key with field
// if `field` is bound, unification will only succeed for the matching key
// if `field` is unbound, unification will succeed for all keys
goals.push(Goal::Unify {
left: field.clone_with_value(Value::String(k.clone().0)),
right: field.clone(),
});
// attempt to unify dict value with result
goals.push(Goal::Unify {
left: v.clone(),
right: value.clone(),
});
alternatives.push(goals);
}
self.choose(alternatives)
}
Value::String(field) => {
if let Some(retrieved) = dict.fields.get(&Symbol(field.clone())) {
self.push_goal(Goal::Unify {
left: retrieved.clone(),
right: value.clone(),
})
} else {
self.push_goal(Goal::Backtrack)
}
}
v => self.type_error(
&field,
format!("cannot look up field {:?} on a dictionary", v),
),
}
}
/// Return an external call event to look up a field's value
/// in an external instance. Push a `Goal::LookupExternal` as
/// an alternative on the last choice point to poll for results.
fn lookup_external(
&mut self,
call_id: u64,
instance: &Term,
field: &Term,
) -> PolarResult<QueryEvent> {
let (field_name, args, kwargs): (
Symbol,
Option<Vec<Term>>,
Option<BTreeMap<Symbol, Term>>,
) = match self.deref(field).value() {
Value::Call(Call { name, args, kwargs }) => (
name.clone(),
Some(args.iter().map(|arg| self.deref(arg)).collect()),
kwargs.as_ref().map(|unwrapped| {
unwrapped
.iter()
.map(|(k, v)| (k.to_owned(), self.deref(v)))
.collect()
}),
),
Value::String(field) => (Symbol(field.clone()), None, None),
v => {
return self.type_error(
field,
format!("cannot look up field {:?} on an external instance", v),
)
}
};
// add an empty choice point; lookups return only one value
// but we'll want to cut if we get back nothing
self.push_choice(vec![])?;
self.log(
LogLevel::Trace,
|| {
let mut msg = format!("LOOKUP: {}.{}", instance, field_name);
msg.push('(');
let args = args
.clone()
.unwrap_or_else(Vec::new)
.into_iter()
.map(|a| a.to_string());
let kwargs = kwargs
.clone()
.unwrap_or_else(BTreeMap::new)
.into_iter()
.map(|(k, v)| format!("{}: {}", k, v));
msg.push_str(&args.chain(kwargs).collect::<Vec<_>>().join(", "));
msg.push(')');
msg
},
&[],
);
Ok(QueryEvent::ExternalCall {
call_id,
instance: self.deref(instance),
attribute: field_name,
args,
kwargs,
})
}
fn isa_external(
&mut self,
instance: &Term,
literal: &InstanceLiteral,
) -> PolarResult<QueryEvent> {
let (call_id, answer) = self.new_call_var("isa", false.into());
self.push_goal(Goal::Unify {
left: answer,
right: Term::from(true),
})?;
Ok(QueryEvent::ExternalIsa {
call_id,
instance: self.deref(instance),
class_tag: literal.tag.clone(),
})
}
fn next_external(&mut self, call_id: u64, iterable: &Term) -> PolarResult<QueryEvent> {
// add another choice point for the next result
self.push_choice(vec![vec![Goal::NextExternal {
call_id,
iterable: iterable.clone(),
}]])?;
Ok(QueryEvent::NextExternal {
call_id,
iterable: iterable.clone(),
})
}
fn make_external(&self, constructor: &Term, instance_id: u64) -> QueryEvent {
QueryEvent::MakeExternal {
instance_id,
constructor: self.deref(constructor),
}
}
fn check_error(&mut self) -> PolarResult<QueryEvent> {
if let Some(msg) = self.external_error.take() {
let term = match self.trace.last().map(|t| t.node.clone()) {
Some(Node::Term(t)) => Some(t),
_ => None,
};
let stack_trace = self.stack_trace();
Err(RuntimeError::Application {
msg,
stack_trace,
term,
}
.into())
} else {
Ok(QueryEvent::None)
}
}
/// Query for the provided term.
///
/// Uses the knowledge base to get an ordered list of rules.
/// Creates a choice point over each rule, where each alternative
/// consists of unifying the rule head with the arguments, then
/// querying for each body clause.
fn query(&mut self, term: &Term) -> PolarResult<QueryEvent> {
// - Print INFO event for queries for rules.
// - Print TRACE (a superset of INFO) event for all other queries.
// - We filter out single-element ANDs, which many rule bodies take the form of, to instead
// log only their inner operations for readibility|brevity reasons.
match &term.value() {
Value::Call(predicate) => {
self.log(
LogLevel::Info,
|| format!("QUERY RULE: {}", predicate),
&[term],
);
}
Value::Expression(Operation {
operator: Operator::And,
args,
}) if args.len() < 2 => (),
_ => {
self.log(LogLevel::Trace, || format!("QUERY: {}", term), &[term]);
}
};
self.queries.push(term.clone());
self.push_goal(Goal::PopQuery { term: term.clone() })?;
self.trace.push(Rc::new(Trace {
node: Node::Term(term.clone()),
children: vec![],
}));
match &term.value() {
Value::Call(predicate) => {
self.query_for_predicate(predicate.clone())?;
}
Value::Expression(_) => {
return self.query_for_operation(term);
}
Value::Variable(sym) => {
self.push_goal(
if let VariableState::Bound(val) = self.variable_state(sym) {
Goal::Query { term: val }
} else {
// variable was unbound
// apply a constraint to variable that it must be truthy
Goal::Unify {
left: term.clone(),
right: term!(true),
}
},
)?
}
Value::Boolean(value) => {
if !value {
// Backtrack if the boolean is false.
self.push_goal(Goal::Backtrack)?;
}
return Ok(QueryEvent::None);
}
_ => {
// everything else dies horribly and in pain
return self.type_error(
term,
format!(
"{} isn't something that is true or false so can't be a condition",
term
),
);
}
}
Ok(QueryEvent::None)
}
/// Select applicable rules for predicate.
/// Sort applicable rules by specificity.
/// Create a choice over the applicable rules.
fn query_for_predicate(&mut self, predicate: Call) -> PolarResult<()> {
if predicate.kwargs.is_some() {
return invalid_state(format!(
"query_for_predicate: unexpected kwargs: {}",
predicate
));
}
let goals = match self.kb.read().unwrap().get_generic_rule(&predicate.name) {
None => {
return Err(RuntimeError::QueryForUndefinedRule {
name: predicate.name.0.clone(),
}
.into())
}
Some(generic_rule) => {
if generic_rule.name != predicate.name {
return invalid_state(format!(
"query_for_predicate: different rule names: {} != {}",
generic_rule.name, predicate.name
));
}
// Pre-filter rules.
let args = predicate.args.iter().map(|t| self.deref(t)).collect();
let pre_filter = generic_rule.get_applicable_rules(&args);
self.polar_trace_mute = true;
// Filter rules by applicability.
vec![
Goal::TraceStackPush,
Goal::FilterRules {
applicable_rules: vec![],
unfiltered_rules: pre_filter,
args: predicate.args,
},
Goal::TraceStackPop,
]
}
};
self.append_goals(goals)
}
fn query_for_operation(&mut self, term: &Term) -> PolarResult<QueryEvent> {
let operation = term.as_expression().unwrap();
let mut args = operation.args.clone();
let wrong_arity = || invalid_state(format!("query_for_operation: wrong arity: {}", term));
match operation.operator {
Operator::And => {
// Query for each conjunct.
self.push_goal(Goal::TraceStackPop)?;
self.append_goals(args.into_iter().map(|term| Goal::Query { term }))?;
self.push_goal(Goal::TraceStackPush)?;
}
Operator::Or => {
// Make an alternative Query for each disjunct.
self.choose(args.into_iter().map(|term| vec![Goal::Query { term }]))?;
}
Operator::Not => {
// Query in a sub-VM and invert the results.
if args.len() != 1 {
return wrong_arity();
}
let term = args.pop().unwrap();
let add_constraints = Rc::new(RefCell::new(Bindings::new()));
let inverter = Box::new(Inverter::new(
self,
vec![Goal::Query { term }],
add_constraints.clone(),
self.bsp(),
));
self.choose_conditional(
vec![Goal::Run { runnable: inverter }],
vec![Goal::AddConstraintsBatch { add_constraints }],
vec![Goal::Backtrack],
)?;
}
Operator::Assign => {
if args.len() != 2 {
return wrong_arity();
}
let right = args.pop().unwrap();
let left = args.pop().unwrap();
match (left.value(), right.value()) {
(Value::Variable(var), _) => match self.variable_state(var) {
VariableState::Unbound => {
self.push_goal(Goal::Unify { left, right })?;
}
_ => {
return self.type_error(
&left,
format!(
"Can only assign to unbound variables, {} is not unbound.",
var
),
);
}
},
_ => return self.type_error(&left, format!("Cannot assign to type {}.", left)),
}
}
Operator::Unify => {
// Push a `Unify` goal
if args.len() != 2 {
return wrong_arity();
}
let right = args.pop().unwrap();
let left = args.pop().unwrap();
self.push_goal(Goal::Unify { left, right })?
}
Operator::Dot => {
return self.query_op_helper(term, Self::dot_op_helper, false, false);
}
Operator::Lt
| Operator::Gt
| Operator::Leq
| Operator::Geq
| Operator::Eq
| Operator::Neq => {
return self.query_op_helper(term, Self::comparison_op_helper, true, true);
}
Operator::Add
| Operator::Sub
| Operator::Mul
| Operator::Div
| Operator::Mod
| Operator::Rem => {
return self.query_op_helper(term, Self::arithmetic_op_helper, true, true);
}
Operator::In => {
return self.query_op_helper(term, Self::in_op_helper, false, true);
}
Operator::Debug => {
let message = self.debugger.break_msg(self).unwrap_or_else(|| {
format!(
"debug({})",
args.iter()
.map(|arg| self.deref(arg).to_string())
.collect::<Vec<_>>()
.join(", ")
)
});
self.push_goal(Goal::Debug { message })?;
}
Operator::Print => {
self.print(
&args
.iter()
.map(|arg| self.deref(arg).to_string())
.collect::<Vec<_>>()
.join(", "),
);
}
Operator::New => {
if args.len() != 2 {
return wrong_arity();
}
let result = args.pop().unwrap();
result.as_symbol()?; // Ensure `result` is a variable.
let constructor = args.pop().unwrap();
let instance_id = self.new_id();
let class = &constructor.as_call()?.name;
let class_repr = if self.kb().is_constant(class) {
Some(class.0.clone())
} else {
None
};
let instance =
constructor.clone_with_value(Value::ExternalInstance(ExternalInstance {
instance_id,
constructor: Some(constructor.clone()),
repr: Some(constructor.to_string()),
class_repr,
class_id: None,
}));
// A goal is used here in case the result is already bound to some external
// instance.
self.append_goals(vec![
Goal::Unify {
left: result,
right: instance,
},
Goal::MakeExternal {
instance_id,
constructor,
},
])?;
}
Operator::Cut => {
if self.query_contains_partial {
return unsupported("cannot use cut with partial evaluation", term);
}
// Remove all choices created before this cut that are in the
// current rule body.
let mut choice_index = self.choices.len();
for choice in self.choices.iter().rev() {
// Comparison excludes the rule body & cut operator (the last two elements of self.queries)
let prefix = &self.queries[..(self.queries.len() - 2)];
if choice.queries.starts_with(prefix) {
// If the choice has the same query stack as the current
// query stack, remove it.
choice_index -= 1;
} else {
break;
}
}
self.push_goal(Goal::Cut { choice_index })?;
}
Operator::Isa => {
// TODO (dhatch): Use query op helper.
if args.len() != 2 {
return wrong_arity();
}
let right = args.pop().unwrap();
let left = args.pop().unwrap();
self.push_goal(Goal::Isa { left, right })?
}
Operator::ForAll => {
if args.len() != 2 {
return wrong_arity();
}
let action = args.pop().unwrap();
let condition = args.pop().unwrap();
// For all is implemented as !(condition, !action).
let op = Operation {
operator: Operator::Not,
args: vec![term.clone_with_value(Value::Expression(Operation {
operator: Operator::And,
args: vec![
condition,
term.clone_with_value(Value::Expression(Operation {
operator: Operator::Not,
args: vec![action],
})),
],
}))],
};
let double_negation = term.clone_with_value(Value::Expression(op));
self.push_goal(Goal::Query {
term: double_negation,
})?;
}
}
Ok(QueryEvent::None)
}
/// Handle variables & constraints as arguments to various operations.
/// Calls the `eval` method to handle ground terms.
///
/// Arguments:
///
/// - handle_unbound_left_var: If set to `false`, allow `eval` to handle
/// operations with an unbound left variable, instead of adding a constraint.
/// Some operations, like `In`, emit new goals or choice points when the left
/// operand is a variable.
/// - handle_unbound_right_var: Same as above but for the RHS. `Dot` uses this.
#[allow(clippy::many_single_char_names)]
fn query_op_helper<F>(
&mut self,
term: &Term,
eval: F,
handle_unbound_left_var: bool,
handle_unbound_right_var: bool,
) -> PolarResult<QueryEvent>
where
F: Fn(&mut Self, &Term) -> PolarResult<QueryEvent>,
{
let Operation { operator: op, args } = term.as_expression().unwrap();
let mut args = args.clone();
if args.len() < 2 {
return invalid_state(format!("query_op_helper: wrong arity: {}", term));
}
let left = &args[0];
let right = &args[1];
match (left.value(), right.value()) {
// We may be querying a partial from the simplifier, which can contain
// embedded binary (as opposed to ternary) dot operations. In that case
// we introduce a new variable, unify it with the dot lookup, then query
// against the variable instead.
//
// TODO(gw) take these out after the simplifier/inverter work better ...
//
// dot on the left
(
Value::Expression(Operation {
operator: Operator::Dot,
args,
}),
_,
) if args.len() == 2 => {
let var = term!(self.kb().gensym("rwdot"));
let val = Value::Expression(Operation {
operator: *op,
args: vec![var.clone(), right.clone()],
});
let term = term.clone_with_value(val);
self.push_goal(Goal::Query { term })?;
self.push_goal(Goal::Unify {
left: left.clone(),
right: var,
})?;
return Ok(QueryEvent::None);
}
// dot on the right
(
_,
Value::Expression(Operation {
operator: Operator::Dot,
args,
}),
) if args.len() == 2 => {
let var = term!(self.kb().gensym("rwdot"));
let val = Value::Expression(Operation {
operator: *op,
args: vec![left.clone(), var.clone()],
});
let term = term.clone_with_value(val);
self.push_goal(Goal::Query { term })?;
self.push_goal(Goal::Unify {
left: var,
right: right.clone(),
})?;
return Ok(QueryEvent::None);
}
// otherwise this isn't allowed.
(Value::Expression(_), _)
| (_, Value::Expression(_))
| (Value::RestVariable(_), _)
| (_, Value::RestVariable(_)) => {
return invalid_state(format!("invalid query: {}", term));
}
_ => {}
};
if let Value::Variable(r) = right.value() {
if let VariableState::Bound(x) = self.variable_state(r) {
args[1] = x;
self.push_goal(Goal::Query {
term: term.clone_with_value(Value::Expression(Operation {
operator: *op,
args,
})),
})?;
return Ok(QueryEvent::None);
} else if !handle_unbound_right_var && left.as_symbol().is_err() {
return eval(self, term);
}
}
if let Value::Variable(l) = left.value() {
if let VariableState::Bound(x) = self.variable_state(l) {
args[0] = x;
self.push_goal(Goal::Query {
term: term.clone_with_value(Value::Expression(Operation {
operator: *op,
args,
})),
})?;
return Ok(QueryEvent::None);
} else if !handle_unbound_left_var && right.as_symbol().is_err() {
return eval(self, term);
}
}
if left.as_symbol().is_ok() || right.as_symbol().is_ok() {
self.add_constraint(term)?;
return Ok(QueryEvent::None);
}
eval(self, term)
}
/// Evaluate comparison operations.
fn comparison_op_helper(&mut self, term: &Term) -> PolarResult<QueryEvent> {
let Operation { operator: op, args } = term.as_expression().unwrap();
if args.len() != 2 {
return invalid_state(format!("comparison_op_helper: wrong arity: {}", term));
}
let left = &args[0];
let right = &args[1];
match (left.value(), right.value()) {
(Value::ExternalInstance(_), _) | (_, Value::ExternalInstance(_)) => {
// Generate a symbol for the external result and bind to `false` (default).
let (call_id, answer) = self.new_call_var("external_op_result", false.into());
// Check that the external result is `true` when we return.
self.push_goal(Goal::Unify {
left: answer,
right: Term::from(true),
})?;
// Emit an event for the external operation.
Ok(QueryEvent::ExternalOp {
call_id,
operator: *op,
args: vec![left.clone(), right.clone()],
})
}
_ => {
if !compare(*op, left, right, Some(term))? {
self.push_goal(Goal::Backtrack)?;
}
Ok(QueryEvent::None)
}
}
}
// TODO(ap, dhatch): Rewrite 3-arg arithmetic ops as 2-arg + unify,
// like we do for dots; e.g., `+(a, b, c)` → `c = +(a, b)`.
/// Evaluate arithmetic operations.
fn arithmetic_op_helper(&mut self, term: &Term) -> PolarResult<QueryEvent> {
let Operation { operator: op, args } = term.as_expression().unwrap();
if args.len() != 3 {
return invalid_state(format!("arithmetic_op_helper: wrong arity: {}", term));
}
let left = &args[0];
let right = &args[1];
let result = &args[2];
result.as_symbol()?; // Ensure `result` is a variable.
match (left.value(), right.value()) {
(Value::Number(left), Value::Number(right)) => {
if let Some(answer) = match op {
Operator::Add => *left + *right,
Operator::Sub => *left - *right,
Operator::Mul => *left * *right,
Operator::Div => *left / *right,
Operator::Mod => (*left).modulo(*right),
Operator::Rem => *left % *right,
_ => return unsupported(format!("numeric operation {}", op), term),
} {
self.push_goal(Goal::Unify {
left: term.clone_with_value(Value::Number(answer)),
right: result.clone(),
})?;
Ok(QueryEvent::None)
} else {
Err(RuntimeError::ArithmeticError { term: term.clone() }.into())
}
}
(_, _) => unsupported(format!("unsupported arithmetic operands: {}", term), term),
}
}
/// Push appropriate goals for lookups on dictionaries and instances.
fn dot_op_helper(&mut self, term: &Term) -> PolarResult<QueryEvent> {
let Operation { args, .. } = term.as_expression().unwrap();
if args.len() != 3 {
return invalid_state(format!("dot_op_helper: wrong arity: {}", term));
}
let mut args = args.clone();
let object = &args[0];
let field = &args[1];
let value = &args[2];
match object.value() {
// Push a `Lookup` goal for simple field lookups on dictionaries.
Value::Dictionary(dict)
if matches!(field.value(), Value::String(_) | Value::Variable(_)) =>
{
self.push_goal(Goal::Lookup {
dict: dict.clone(),
field: field.clone(),
value: args.remove(2),
})?
}
// Push an `ExternalLookup` goal for external instances and built-ins.
Value::Dictionary(_)
| Value::ExternalInstance(_)
| Value::List(_)
| Value::Number(_)
| Value::String(_) => {
let answer = self.kb.read().unwrap().gensym("lookup_value");
let call_id = self.new_call_id(&answer);
self.append_goals(vec![
Goal::LookupExternal {
call_id,
field: field.clone(),
instance: object.clone(),
},
Goal::CheckError,
Goal::Unify {
left: value.clone(),
right: Term::from(answer),
},
])?;
}
Value::Variable(v) => {
if matches!(field.value(), Value::Call(_)) {
return unsupported(
format!("cannot call method on unbound variable {}", v),
object,
);
}
// Translate `.(object, field, value)` → `value = .(object, field)`.
let dot2 = op!(Dot, object.clone(), field.clone());
let value = self.deref(value);
let term = Term::from(op!(Unify, value, dot2.into()));
self.add_constraint(&term)?;
}
_ => {
return self.type_error(
object,
format!(
"can only perform lookups on dicts and instances, this is {}",
object
),
)
}
}
Ok(QueryEvent::None)
}
fn in_op_helper(&mut self, term: &Term) -> PolarResult<QueryEvent> {
let Operation { args, .. } = term.as_expression().unwrap();
if args.len() != 2 {
return invalid_state(format!("in_op_helper: wrong arity: {}", term));
}
let item = &args[0];
let iterable = &args[1];
let item_is_ground = item.is_ground();
match iterable.value() {
// Unify item with each element of the list, skipping non-matching ground terms.
Value::List(terms) => self.choose(
terms
.iter()
.filter(|term| {
!item_is_ground || !term.is_ground() || term.value() == item.value()
})
.map(|term| match term.value() {
Value::RestVariable(v) => {
let term = op!(In, item.clone(), Term::from(v.clone())).into();
vec![Goal::Query { term }]
}
_ => vec![Goal::Unify {
left: item.clone(),
right: term.clone(),
}],
})
.collect::<Vec<Goals>>(),
)?,
// Unify item with each (k, v) pair of the dict, skipping non-matching ground terms.
Value::Dictionary(dict) => self.choose(
dict.fields
.iter()
.map(|(k, v)| {
iterable.clone_with_value(Value::List(vec![
v.clone_with_value(Value::String(k.0.clone())),
v.clone(),
]))
})
.filter(|term| {
!item_is_ground || !term.is_ground() || term.value() == item.value()
})
.map(|term| {
vec![Goal::Unify {
left: item.clone(),
right: term,
}]
})
.collect::<Vec<Goals>>(),
)?,
// Unify item with each element of the string
// FIXME (gw): this seems strange, wouldn't a substring search make more sense?
Value::String(s) => self.choose(
s.chars()
.map(|c| c.to_string())
.map(Value::String)
.filter(|c| !item_is_ground || c == item.value())
.map(|c| {
vec![Goal::Unify {
left: item.clone(),
right: iterable.clone_with_value(c),
}]
})
.collect::<Vec<Goals>>(),
)?,
// Push an `ExternalLookup` goal for external instances
Value::ExternalInstance(_) => {
// Generate symbol for next result and leave the variable unbound, so that unification with the result does not fail
// Unification of the `next_sym` variable with the result of `NextExternal` happens in `fn external_call_result()`
// `external_call_result` is the handler for results from both `LookupExternal` and `NextExternal`, so neither can bind the
// call ID variable to `false`.
let next_sym = self.kb.read().unwrap().gensym("next_value");
let call_id = self.new_call_id(&next_sym);
// append unify goal to be evaluated after
// next result is fetched
self.append_goals(vec![
Goal::NextExternal {
call_id,
iterable: self.deref(iterable),
},
Goal::Unify {
left: item.clone(),
right: Term::from(next_sym),
},
])?;
}
_ => {
return self.type_error(
iterable,
format!(
"can only use `in` on an iterable value, this is {:?}",
iterable.value()
),
);
}
}
Ok(QueryEvent::None)
}
/// Unify `left` and `right` terms.
///
/// Outcomes of a unification are:
/// - Successful unification => bind zero or more variables to values
/// - Recursive unification => more `Unify` goals are pushed onto the stack
/// - Failure => backtrack
fn unify(&mut self, left: &Term, right: &Term) -> PolarResult<()> {
match (left.value(), right.value()) {
(Value::Expression(op), other) | (other, Value::Expression(op)) => {
match op {
// this branch handles dot ops that were rewritten for inclusion
// in a partial by Vm::dot_op_helper(), but then queried again after
// the partial was bound by Vm::bind().
Operation {
operator: Operator::Dot,
args,
} if args.len() == 2 => {
let term = Term::from(op!(
Dot,
args[0].clone(),
args[1].clone(),
Term::from(other.clone())
));
self.push_goal(Goal::Query { term })?
}
// otherwise this should never happen.
_ => {
return self.type_error(
left,
format!("cannot unify expressions directly `{}` = `{}`", left, right),
)
}
}
}
(Value::Pattern(_), _) | (_, Value::Pattern(_)) => {
return self.type_error(
left,
format!("cannot unify patterns directly `{}` = `{}`", left, right),
);
}
// Unify two variables.
// TODO(gj): (Var, Rest) + (Rest, Var) cases might be unreachable.
(Value::Variable(l), Value::Variable(r))
| (Value::Variable(l), Value::RestVariable(r))
| (Value::RestVariable(l), Value::Variable(r))
| (Value::RestVariable(l), Value::RestVariable(r)) => {
// FIXME(gw):
// if the variables are the same the unification succeeds, so
// we don't need to do anything. but this causes an inconsistency
// with NaN where `nan = nan` is false but `x = nan and x = x` is
// true. if we really want to keep the NaN equality semantics
// maybe we can have `nan = nan` but not `nan == nan`?
if l != r {
match (self.variable_state(l), self.variable_state(r)) {
(VariableState::Bound(x), VariableState::Bound(y)) => {
// Both variables are bound. Unify their values.
self.push_goal(Goal::Unify { left: x, right: y })?;
}
_ => {
// At least one variable is unbound. Bind it.
if self.bind(l, right.clone()).is_err() {
self.push_goal(Goal::Backtrack)?;
}
}
}
}
}
// FIXME(gw): i think we might actually want this, see the comment
// above about unifying variables.
// (Value::Number(Numeric::Float(a)),
// Value::Number(Numeric::Float(b)))
// if a.is_nan() && b.is_nan() => (),
// Unify/bind a variable on the left with/to the term on the right.
(Value::Variable(var), _) | (Value::RestVariable(var), _) => {
let right = right.clone();
match self.variable_state(var) {
VariableState::Bound(value) => {
self.push_goal(Goal::Unify { left: value, right })?;
}
_ => {
if self.bind(var, right).is_err() {
self.push_goal(Goal::Backtrack)?;
}
}
}
}
// Unify/bind a variable on the right with/to the term on the left.
(_, Value::Variable(var)) | (_, Value::RestVariable(var)) => {
let left = left.clone();
match self.variable_state(var) {
VariableState::Bound(value) => {
self.push_goal(Goal::Unify { left, right: value })?;
}
_ => {
if self.bind(var, left).is_err() {
self.push_goal(Goal::Backtrack)?;
}
}
}
}
// Unify predicates like unifying heads
(Value::Call(left), Value::Call(right)) => {
if left.kwargs.is_some() || right.kwargs.is_some() {
// Handled in the parser.
return invalid_state("unify: unexpected kwargs".to_string());
}
if left.name == right.name && left.args.len() == right.args.len() {
self.append_goals(left.args.iter().zip(right.args.iter()).map(
|(left, right)| Goal::Unify {
left: left.clone(),
right: right.clone(),
},
))?;
} else {
self.push_goal(Goal::Backtrack)?
}
}
// Unify lists by recursively unifying their elements.
(Value::List(l), Value::List(r)) => self.unify_lists(l, r, |(l, r)| Goal::Unify {
left: l.clone(),
right: r.clone(),
})?,
(Value::Dictionary(left), Value::Dictionary(right)) => {
// Check that the set of keys are the same.
let left_fields: HashSet<&Symbol> = left.fields.keys().collect();
let right_fields: HashSet<&Symbol> = right.fields.keys().collect();
if left_fields != right_fields {
self.push_goal(Goal::Backtrack)?;
return Ok(());
}
// For each value, push a unify goal.
for (k, v) in left.fields.iter() {
let right = right.fields.get(k).expect("fields should be equal").clone();
self.push_goal(Goal::Unify {
left: v.clone(),
right,
})?
}
}
// Unify integers by value.
(Value::Number(left), Value::Number(right)) => {
if left != right {
self.push_goal(Goal::Backtrack)?;
}
}
(Value::String(left), Value::String(right)) => {
if left != right {
self.push_goal(Goal::Backtrack)?;
}
}
// Unify bools by value.
(Value::Boolean(left), Value::Boolean(right)) => {
if left != right {
self.push_goal(Goal::Backtrack)?;
}
}
(
Value::ExternalInstance(ExternalInstance {
instance_id: left, ..
}),
Value::ExternalInstance(ExternalInstance {
instance_id: right, ..
}),
) if left == right => (),
// If either operand is an external instance, let the host
// compare them for equality. This handles unification between
// "equivalent" host and native types transparently.
(Value::ExternalInstance(_), _) | (_, Value::ExternalInstance(_)) => {
self.push_goal(Goal::Query {
term: Term::from(Operation {
operator: Operator::Eq,
args: vec![left.clone(), right.clone()],
}),
})?
}
// Anything else fails.
(_, _) => self.push_goal(Goal::Backtrack)?,
}
Ok(())
}
/// "Unify" two lists element-wise, respecting rest-variables.
/// Used by both `unify` and `isa`; hence the third argument,
/// a closure that builds sub-goals.
#[allow(clippy::ptr_arg)]
fn unify_lists<F>(&mut self, left: &TermList, right: &TermList, unify: F) -> PolarResult<()>
where
F: FnMut((&Term, &Term)) -> Goal,
{
if has_rest_var(left) && has_rest_var(right) {
self.unify_two_lists_with_rest(left, right, unify)
} else if has_rest_var(left) {
self.unify_rest_list_with_list(left, right, unify)
} else if has_rest_var(right) {
self.unify_rest_list_with_list(right, left, unify)
} else if left.len() == right.len() {
// No rest-variables; unify element-wise.
self.append_goals(left.iter().zip(right).map(unify))
} else {
self.push_goal(Goal::Backtrack)
}
}
/// Unify two list that end with a rest-variable with eachother.
/// A helper method for `unify_lists`.
#[allow(clippy::ptr_arg)]
fn unify_two_lists_with_rest<F>(
&mut self,
rest_list_a: &TermList,
rest_list_b: &TermList,
mut unify: F,
) -> PolarResult<()>
where
F: FnMut((&Term, &Term)) -> Goal,
{
if rest_list_a.len() == rest_list_b.len() {
let n = rest_list_b.len() - 1;
let rest = unify((&rest_list_b[n].clone(), &rest_list_a[n].clone()));
self.append_goals(
rest_list_b
.iter()
.take(n)
.zip(rest_list_a)
.map(unify)
.chain(vec![rest]),
)
} else {
let (shorter, longer) = {
if rest_list_a.len() < rest_list_b.len() {
(rest_list_a, rest_list_b)
} else {
(rest_list_b, rest_list_a)
}
};
let n = shorter.len() - 1;
let rest = unify((&shorter[n].clone(), &Term::from(longer[n..].to_vec())));
self.append_goals(
shorter
.iter()
.take(n)
.zip(longer)
.map(unify)
.chain(vec![rest]),
)
}
}
/// Unify a list that ends with a rest-variable with another that doesn't.
/// A helper method for `unify_lists`.
#[allow(clippy::ptr_arg)]
fn unify_rest_list_with_list<F>(
&mut self,
rest_list: &TermList,
list: &TermList,
mut unify: F,
) -> PolarResult<()>
where
F: FnMut((&Term, &Term)) -> Goal,
{
let n = rest_list.len() - 1;
if list.len() >= n {
let rest = unify((&rest_list[n].clone(), &Term::from(list[n..].to_vec())));
self.append_goals(
rest_list
.iter()
.take(n)
.zip(list)
.map(unify)
.chain(vec![rest]),
)
} else {
self.push_goal(Goal::Backtrack)
}
}
/// Filter rules to just those applicable to a list of arguments,
/// then sort them by specificity.
#[allow(clippy::ptr_arg)]
fn filter_rules(
&mut self,
applicable_rules: &Rules,
unfiltered_rules: &Rules,
args: &TermList,
) -> PolarResult<()> {
if unfiltered_rules.is_empty() {
// The rules have been filtered. Sort them.
if applicable_rules.is_empty() {
self.log(LogLevel::Info, || "No matching rules found", &[]);
}
self.push_goal(Goal::SortRules {
rules: applicable_rules.iter().rev().cloned().collect(),
args: args.clone(),
outer: 1,
inner: 1,
})
} else {
// Check one rule for applicability.
let mut unfiltered_rules = unfiltered_rules.clone();
let rule = unfiltered_rules.pop().unwrap();
let inapplicable = Goal::FilterRules {
args: args.clone(),
applicable_rules: applicable_rules.clone(),
unfiltered_rules: unfiltered_rules.clone(),
};
if rule.params.len() != args.len() {
return self.push_goal(inapplicable); // wrong arity
}
let mut applicable_rules = applicable_rules.clone();
applicable_rules.push(rule.clone());
let applicable = Goal::FilterRules {
args: args.clone(),
applicable_rules,
unfiltered_rules,
};
// The prefilter already checks applicability for ground rules.
if rule.is_ground() {
return self.push_goal(applicable);
}
// Rename the variables in the rule (but not the args).
// This avoids clashes between arg vars and rule vars.
let Rule { params, .. } = self.rename_rule_vars(&rule);
let mut check_applicability = vec![];
for (arg, param) in args.iter().zip(params.iter()) {
check_applicability.push(Goal::Unify {
left: arg.clone(),
right: param.parameter.clone(),
});
if let Some(specializer) = ¶m.specializer {
check_applicability.push(Goal::Isa {
left: arg.clone(),
right: specializer.clone(),
});
}
}
self.choose_conditional(check_applicability, vec![applicable], vec![inapplicable])?;
Ok(())
}
}
/// Sort a list of rules with respect to a list of arguments
/// using an explicit-state insertion sort.
///
/// We maintain two indices for the sort, `outer` and `inner`. The `outer` index tracks our
/// sorting progress. Every rule at or below `outer` is sorted; every rule above it is
/// unsorted. The `inner` index tracks our search through the sorted sublist for the correct
/// position of the candidate rule (the rule at the head of the unsorted portion of the
/// list).
#[allow(clippy::ptr_arg)]
fn sort_rules(
&mut self,
rules: &Rules,
args: &TermList,
outer: usize,
inner: usize,
) -> PolarResult<()> {
if rules.is_empty() {
return self.push_goal(Goal::Backtrack);
} else if outer > rules.len() {
return invalid_state("bad outer index".to_string());
} else if inner > rules.len() {
return invalid_state("bad inner index".to_string());
} else if inner > outer {
return invalid_state("bad insertion sort state".to_string());
}
let next_outer = Goal::SortRules {
rules: rules.clone(),
args: args.clone(),
outer: outer + 1,
inner: outer + 1,
};
// Because `outer` starts as `1`, if there is only one rule in the `Rules`, this check
// fails and we jump down to the evaluation of that lone rule.
if outer < rules.len() {
if inner > 0 {
let compare = Goal::IsMoreSpecific {
left: rules[inner].clone(),
right: rules[inner - 1].clone(),
args: args.clone(),
};
let mut rules = rules.clone();
rules.swap(inner - 1, inner);
let next_inner = Goal::SortRules {
rules,
outer,
inner: inner - 1,
args: args.clone(),
};
// If the comparison fails, break out of the inner loop.
// If the comparison succeeds, continue the inner loop with the swapped rules.
self.choose_conditional(vec![compare], vec![next_inner], vec![next_outer])?;
} else {
if inner != 0 {
return invalid_state("inner == 0".to_string());
}
self.push_goal(next_outer)?;
}
} else {
// We're done; the rules are sorted.
// Make alternatives for calling them.
self.polar_trace_mute = false;
self.log(
LogLevel::Info,
|| {
let mut rule_strs = "APPLICABLE_RULES:".to_owned();
for rule in rules {
let context = rule
.parsed_context()
.map_or_else(|| "".into(), Context::source_position);
rule_strs.push_str(&format!("\n {}{}", rule.head_as_string(), context));
}
rule_strs
},
&[],
);
let mut alternatives = Vec::with_capacity(rules.len());
for rule in rules.iter() {
let mut goals = Vec::with_capacity(2 * args.len() + 4);
goals.push(Goal::TraceRule {
trace: Rc::new(Trace {
node: Node::Rule(rule.clone()),
children: vec![],
}),
});
goals.push(Goal::TraceStackPush);
let Rule { body, params, .. } = self.rename_rule_vars(rule);
// Unify the arguments with the formal parameters.
for (arg, param) in args.iter().zip(params.iter()) {
goals.push(Goal::Unify {
left: arg.clone(),
right: param.parameter.clone(),
});
if let Some(specializer) = ¶m.specializer {
goals.push(Goal::Isa {
left: param.parameter.clone(),
right: specializer.clone(),
});
}
}
// Query for the body clauses.
goals.push(Goal::Query { term: body.clone() });
goals.push(Goal::TraceStackPop);
alternatives.push(goals)
}
// Choose the first alternative, and push a choice for the rest.
self.choose(alternatives)?;
}
Ok(())
}
/// Succeed if `left` is more specific than `right` with respect to `args`.
#[allow(clippy::ptr_arg)]
fn is_more_specific(&mut self, left: &Rule, right: &Rule, args: &TermList) -> PolarResult<()> {
let zipped = left.params.iter().zip(right.params.iter()).zip(args.iter());
for ((left_param, right_param), arg) in zipped {
match (&left_param.specializer, &right_param.specializer) {
// If both specs are unions, they have the same specificity regardless of whether
// they're the same or different unions.
//
// TODO(gj): when we have unions beyond `Actor` and `Resource`, we'll need to be
// smarter about this check since UnionA is more specific than UnionB if UnionA is
// a member of UnionB.
(Some(left_spec), Some(right_spec))
if self.kb.read().unwrap().is_union(left_spec)
&& self.kb.read().unwrap().is_union(right_spec) => {}
// If left is a union and right is not, left cannot be more specific, so we
// backtrack.
(Some(left_spec), Some(_)) if self.kb.read().unwrap().is_union(left_spec) => {
return self.push_goal(Goal::Backtrack)
}
// If right is a union and left is not, left IS more specific, so we return.
(Some(_), Some(right_spec)) if self.kb.read().unwrap().is_union(right_spec) => {
return Ok(())
}
(Some(left_spec), Some(right_spec)) => {
// If you find two non-equal specializers, that comparison determines the relative
// specificity of the two rules completely. As soon as you have two specializers
// that aren't the same and you can compare them and ask which one is more specific
// to the relevant argument, you're done.
if left_spec != right_spec {
let answer = self.kb.read().unwrap().gensym("is_subspecializer");
// Bind answer to false as a starting point in case is subspecializer doesn't
// bind any result.
// This is done here for safety to avoid a bug where `answer` is unbound by
// `IsSubspecializer` and the `Unify` Goal just assigns it to `true` instead
// of checking that is is equal to `true`.
self.bind(&answer, Term::from(false)).unwrap();
return self.append_goals(vec![
Goal::IsSubspecializer {
answer: answer.clone(),
left: left_spec.clone(),
right: right_spec.clone(),
arg: arg.clone(),
},
Goal::Unify {
left: Term::from(answer),
right: Term::from(true),
},
]);
}
}
// If the left rule has no specializer and the right does, it is NOT more specific,
// so we Backtrack (fail)
(None, Some(_)) => return self.push_goal(Goal::Backtrack),
// If the left rule has a specializer and the right does not, the left IS more specific,
// so we return
(Some(_), None) => return Ok(()),
// If neither has a specializer, neither is more specific, so we continue to the next argument.
(None, None) => (),
}
}
// Fail on any of the above branches that do not return
self.push_goal(Goal::Backtrack)
}
/// Determine if `left` is a more specific specializer ("subspecializer") than `right`
fn is_subspecializer(
&mut self,
answer: &Symbol,
left: &Term,
right: &Term,
arg: &Term,
) -> PolarResult<QueryEvent> {
let arg = self.deref(arg);
match (arg.value(), left.value(), right.value()) {
(
Value::ExternalInstance(instance),
Value::Pattern(Pattern::Instance(left_lit)),
Value::Pattern(Pattern::Instance(right_lit)),
) => {
let call_id = self.new_call_id(answer);
let instance_id = instance.instance_id;
if left_lit.tag == right_lit.tag
&& !(left_lit.fields.fields.is_empty() && right_lit.fields.fields.is_empty())
{
self.push_goal(Goal::IsSubspecializer {
answer: answer.clone(),
left: left.clone_with_value(Value::Pattern(Pattern::Dictionary(
left_lit.fields.clone(),
))),
right: right.clone_with_value(Value::Pattern(Pattern::Dictionary(
right_lit.fields.clone(),
))),
arg,
})?;
}
// check ordering based on the classes
Ok(QueryEvent::ExternalIsSubSpecializer {
call_id,
instance_id,
left_class_tag: left_lit.tag.clone(),
right_class_tag: right_lit.tag.clone(),
})
}
(
_,
Value::Pattern(Pattern::Dictionary(left)),
Value::Pattern(Pattern::Dictionary(right)),
) => {
let left_fields: HashSet<&Symbol> = left.fields.keys().collect();
let right_fields: HashSet<&Symbol> = right.fields.keys().collect();
// The dictionary with more fields is taken as more specific.
// The assumption here is that rules have already been filtered
// for applicability.
if left_fields.len() != right_fields.len() {
self.rebind_external_answer(
answer,
Term::from(right_fields.len() < left.fields.len()),
);
}
Ok(QueryEvent::None)
}
(_, Value::Pattern(Pattern::Instance(_)), Value::Pattern(Pattern::Dictionary(_))) => {
self.rebind_external_answer(answer, Term::from(true));
Ok(QueryEvent::None)
}
_ => {
self.rebind_external_answer(answer, Term::from(false));
Ok(QueryEvent::None)
}
}
}
pub fn term_source(&self, term: &Term, include_info: bool) -> String {
let source_info = term.parsed_context();
let mut source_string = if let Some(context) = source_info {
let chars = context.source.src.chars();
chars.take(context.right).skip(context.left).collect()
} else {
term.to_string()
};
if include_info {
if let Some(context) = source_info {
source_string += &context.source_position();
}
}
source_string
}
fn type_error<T>(&self, term: &Term, msg: String) -> PolarResult<T> {
Err(RuntimeError::TypeError {
msg,
stack_trace: self.stack_trace(),
term: term.clone(),
}
.into())
}
fn run_runnable(&mut self, runnable: Box<dyn Runnable>) -> PolarResult<QueryEvent> {
let (call_id, answer) = self.new_call_var("runnable_result", Value::Boolean(false));
self.push_goal(Goal::Unify {
left: answer,
right: Term::from(true),
})?;
Ok(QueryEvent::Run { runnable, call_id })
}
/// Handle an error coming from outside the vm.
pub fn external_error(&mut self, message: String) -> PolarResult<()> {
self.external_error = Some(message);
Ok(())
}
}
impl Runnable for PolarVirtualMachine {
/// Run the virtual machine. While there are goals on the stack,
/// pop them off and execute them one at a time until we have a
/// `QueryEvent` to return. May be called multiple times to restart
/// the machine.
fn run(&mut self, _: Option<&mut Counter>) -> PolarResult<QueryEvent> {
if self.query_start_time.is_none() {
#[cfg(not(target_arch = "wasm32"))]
let query_start_time = Some(std::time::Instant::now());
#[cfg(target_arch = "wasm32")]
let query_start_time = Some(js_sys::Date::now());
self.query_start_time = query_start_time;
}
if self.goals.is_empty() {
if self.choices.is_empty() {
return Ok(QueryEvent::Done { result: true });
} else {
self.backtrack()?;
}
}
while let Some(goal) = self.goals.pop() {
match self.next(goal.clone())? {
QueryEvent::None => (),
event => {
self.external_error = None;
return Ok(event);
}
}
self.maybe_break(DebugEvent::Goal(goal.clone()))?;
}
if self.tracing {
for t in &self.trace {
self.log(LogLevel::Trace, || format!("trace\n{}", t.draw(self)), &[]);
}
}
let trace = if self.tracing {
let trace = self.trace.first().cloned();
trace.map(|trace| TraceResult {
formatted: trace.draw(self),
trace,
})
} else {
None
};
let mut bindings = self.bindings(true);
if !self.inverting {
match simplify_bindings_opt(bindings, false) {
Ok(Some(bs)) => {
// simplification succeeds
bindings = bs;
}
Ok(None) => {
// incompatible bindings; simplification fails
// do not return result
return Ok(QueryEvent::None);
}
Err(RuntimeError::UnhandledPartial { term, ref var }) => {
// use the debugger to get the nicest possible version of this binding
let Binding(original_var_name, simplified) = get_binding_for_var(&var.0, self);
// TODO(gj): `t` is a partial constructed in the VM, so we don't have any
// source context for it. We make a best effort to track down some relevant
// context by walking `t` in search of the first piece of source context we
// find.
//
// For a future refactor, we might consider using the `Term::clone_with_value`
// API to preserve source context when initially binding a variable to an
// `Expression`.
fn try_to_add_context(t: &Term, simplified: Term) -> Term {
/// `GetSource` walks a term & returns the _1st_ piece of source info it finds.
struct GetSource {
term: Option<Term>,
}
impl Visitor for GetSource {
fn visit_term(&mut self, t: &Term) {
if self.term.is_none() {
if t.parsed_context().is_none() {
walk_term(self, t)
} else {
self.term = Some(t.clone())
}
}
}
}
let mut source_getter = GetSource { term: None };
source_getter.visit_term(t);
if let Some(term_with_context) = source_getter.term {
term_with_context.clone_with_value(simplified.value().clone())
} else {
simplified
}
}
// there was an unhandled partial in the bindings
// grab the context from the variable that was defined and
// set the context before returning
return Err(RuntimeError::UnhandledPartial {
term: try_to_add_context(&term, simplified),
var: original_var_name,
}
.into());
}
Err(e) => unreachable!("unexpected error: {}", e.to_string()),
}
bindings = bindings
.clone()
.into_iter()
.filter(|(var, _)| !var.is_temporary_var())
.map(|(var, value)| (var.clone(), sub_this(var, value)))
.collect();
}
self.log(
LogLevel::Info,
|| {
if bindings.is_empty() {
"RESULT: SUCCESS".to_string()
} else {
let mut out = "RESULT: {\n".to_string(); // open curly & newline
for (key, value) in &bindings {
out.push_str(&format!(" {}: {}\n", key, value)); // key-value pairs spaced w/ newlines
}
out.push('}'); // closing curly
out
}
},
&[],
);
Ok(QueryEvent::Result { bindings, trace })
}
fn handle_error(&mut self, error: PolarError) -> PolarResult<QueryEvent> {
// if we pushed a debug goal, push an error goal underneath it.
if self.maybe_break(DebugEvent::Error(error.clone()))? {
let g = self.goals.pop().unwrap();
self.push_goal(Goal::Error { error })?;
self.goals.push(g);
Ok(QueryEvent::None)
} else {
Err(error)
}
}
/// Handle response to a predicate posed to the application, e.g., `ExternalIsa`.
fn external_question_result(&mut self, call_id: u64, answer: bool) -> PolarResult<()> {
let var = self.call_id_symbols.remove(&call_id).expect("bad call id");
self.rebind_external_answer(&var, Term::from(answer));
Ok(())
}
/// Handle an external result provided by the application.
///
/// If the value is `Some(_)` then we have a result, and unify the
/// symbol associated with the call ID to the result value. If the
/// value is `None` then the external has no (more) results, so we
/// backtrack to the choice point left by `Goal::LookupExternal`.
fn external_call_result(&mut self, call_id: u64, term: Option<Term>) -> PolarResult<()> {
// TODO: Open question if we need to pass errors back down to rust.
// For example what happens if the call asked for a field that doesn't exist?
if let Some(value) = term {
self.log(LogLevel::Trace, || format!("=> {}", value), &[]);
// Fetch variable to unify with call result.
let sym = self.get_call_sym(call_id).to_owned();
self.push_goal(Goal::Unify {
left: Term::from(sym),
right: value,
})?;
} else {
self.log(LogLevel::Trace, || "=> No more results.", &[]);
// No more results. Clean up, cut out the retry alternative,
// and backtrack.
self.call_id_symbols.remove(&call_id).expect("bad call ID");
let check_error = if let Some(goal) = self.goals.last() {
matches!(*(*goal), Goal::CheckError)
} else {
false
};
self.push_goal(Goal::Backtrack)?;
self.push_goal(Goal::Cut {
choice_index: self.choices.len() - 1,
})?;
if check_error {
self.push_goal(Goal::CheckError)?;
}
}
Ok(())
}
/// Drive debugger.
fn debug_command(&mut self, command: &str) -> PolarResult<()> {
let mut debugger = self.debugger.clone();
let maybe_goal = debugger.debug_command(command, self);
if let Some(goal) = maybe_goal {
self.push_goal(goal)?;
}
self.debugger = debugger;
Ok(())
}
fn clone_runnable(&self) -> Box<dyn Runnable> {
Box::new(self.clone())
}
}
#[cfg(test)]
mod tests {
use permute::permute;
use super::*;
use crate::error::ErrorKind;
use crate::rewrites::unwrap_and;
impl PolarVirtualMachine {
/// Return true if there is nothing left to do.
fn is_halted(&self) -> bool {
self.goals.is_empty() && self.choices.is_empty()
}
}
/// Shorthand for constructing Goal::Query.
///
/// A one argument invocation assumes the 1st argument is the same
/// parameters that can be passed to the term! macro. In this invocation,
/// typically the form `query!(op!(And, term!(TERM)))` will be used. The
/// one argument form allows for queries with a top level operator other
/// than AND.
///
/// Multiple arguments `query!(f1, f2, f3)` result in a query with a root
/// AND operator term.
macro_rules! query {
($term:expr) => {
Goal::Query {
term: term!($term)
}
};
($($term:expr),+) => {
Goal::Query {
term: term!(op!(And, $($term),+))
}
};
}
/// Macro takes two arguments, the vm and a list-like structure of
/// QueryEvents to expect. It will call run() for each event in the second
/// argument and pattern match to check that the event matches what is
/// expected. Then `vm.is_halted()` is checked.
///
/// The QueryEvent list elements can either be:
/// - QueryEvent::Result{EXPR} where EXPR is a HashMap<Symbol, Term>.
/// This is shorthand for QueryEvent::Result{bindings} if bindings == EXPR.
/// Use btreemap! for EXPR from the maplit package to write inline hashmaps
/// to assert on.
/// - A pattern with optional guard accepted by matches!. (QueryEvent::Result
/// cannot be matched on due to the above rule.)
macro_rules! assert_query_events {
($vm:ident, []) => {
assert!($vm.is_halted());
};
($vm:ident, [QueryEvent::Result{$result:expr}]) => {
assert!(matches!($vm.run(None).unwrap(), QueryEvent::Result{bindings, ..} if bindings == $result));
assert_query_events!($vm, []);
};
($vm:ident, [QueryEvent::Result{$result:expr}, $($tail:tt)*]) => {
assert!(matches!($vm.run(None).unwrap(), QueryEvent::Result{bindings, ..} if bindings == $result));
assert_query_events!($vm, [$($tail)*]);
};
($vm:ident, [$( $pattern:pat_param )|+ $( if $guard: expr )?]) => {
assert!(matches!($vm.run(None).unwrap(), $($pattern)|+ $(if $guard)?));
assert_query_events!($vm, []);
};
($vm:ident, [$( $pattern:pat_param )|+ $( if $guard: expr )?, $($tail:tt)*]) => {
assert!(matches!($vm.run(None).unwrap(), $($pattern)|+ $(if $guard)?));
assert_query_events!($vm, [$($tail)*]);
};
// TODO (dhatch) Be able to use btreemap! to match on specific bindings.
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn and_expression() {
let f1 = rule!("f", [1]);
let f2 = rule!("f", [2]);
let rule = GenericRule::new(sym!("f"), vec![Arc::new(f1), Arc::new(f2)]);
let mut kb = KnowledgeBase::new();
kb.add_generic_rule(rule);
let goal = query!(op!(And));
let mut vm = PolarVirtualMachine::new_test(Arc::new(RwLock::new(kb)), false, vec![goal]);
assert_query_events!(vm, [
QueryEvent::Result{hashmap!()},
QueryEvent::Done { result: true }
]);
assert!(vm.is_halted());
let f1 = term!(call!("f", [1]));
let f2 = term!(call!("f", [2]));
let f3 = term!(call!("f", [3]));
// Querying for f(1)
vm.push_goal(query!(op!(And, f1.clone()))).unwrap();
assert_query_events!(vm, [
QueryEvent::Result{hashmap!{}},
QueryEvent::Done { result: true }
]);
// Querying for f(1), f(2)
vm.push_goal(query!(f1.clone(), f2.clone())).unwrap();
assert_query_events!(vm, [
QueryEvent::Result{hashmap!{}},
QueryEvent::Done { result: true }
]);
// Querying for f(3)
vm.push_goal(query!(op!(And, f3.clone()))).unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
// Querying for f(1), f(2), f(3)
let parts = vec![f1, f2, f3];
for permutation in permute(parts) {
vm.push_goal(Goal::Query {
term: Term::new_from_test(Value::Expression(Operation {
operator: Operator::And,
args: permutation,
})),
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
}
}
#[test]
fn unify_expression() {
let mut vm = PolarVirtualMachine::default();
vm.push_goal(query!(op!(Unify, term!(1), term!(1))))
.unwrap();
assert_query_events!(vm, [
QueryEvent::Result{hashmap!{}},
QueryEvent::Done { result: true }
]);
let q = op!(Unify, term!(1), term!(2));
vm.push_goal(query!(q)).unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn isa_on_lists() {
let mut vm = PolarVirtualMachine::default();
let one = term!(1);
let one_list = term!([1]);
let one_two_list = term!([1, 2]);
let two_one_list = term!([2, 1]);
let empty_list = term!([]);
// [] isa []
vm.push_goal(Goal::Isa {
left: empty_list.clone(),
right: empty_list.clone(),
})
.unwrap();
assert!(
matches!(vm.run(None).unwrap(), QueryEvent::Result{bindings, ..} if bindings.is_empty())
);
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [1,2] isa [1,2]
vm.push_goal(Goal::Isa {
left: one_two_list.clone(),
right: one_two_list.clone(),
})
.unwrap();
assert!(
matches!(vm.run(None).unwrap(), QueryEvent::Result{bindings, ..} if bindings.is_empty())
);
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [1,2] isNOTa [2,1]
vm.push_goal(Goal::Isa {
left: one_two_list.clone(),
right: two_one_list,
})
.unwrap();
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [1] isNOTa [1,2]
vm.push_goal(Goal::Isa {
left: one_list.clone(),
right: one_two_list.clone(),
})
.unwrap();
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [1,2] isNOTa [1]
vm.push_goal(Goal::Isa {
left: one_two_list.clone(),
right: one_list.clone(),
})
.unwrap();
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [1] isNOTa []
vm.push_goal(Goal::Isa {
left: one_list.clone(),
right: empty_list.clone(),
})
.unwrap();
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [] isNOTa [1]
vm.push_goal(Goal::Isa {
left: empty_list,
right: one_list.clone(),
})
.unwrap();
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [1] isNOTa 1
vm.push_goal(Goal::Isa {
left: one_list.clone(),
right: one.clone(),
})
.unwrap();
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// 1 isNOTa [1]
vm.push_goal(Goal::Isa {
left: one,
right: one_list,
})
.unwrap();
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Done { result: true }
));
assert!(vm.is_halted());
// [1,2] isa [1, *rest]
vm.push_goal(Goal::Isa {
left: one_two_list,
right: term!([1, Value::RestVariable(sym!("rest"))]),
})
.unwrap();
assert_query_events!(vm, [
QueryEvent::Result{hashmap!{sym!("rest") => term!([2])}},
QueryEvent::Done { result: true }
]);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn isa_on_dicts() {
let mut vm = PolarVirtualMachine::default();
let dict = term!(btreemap! {
sym!("x") => term!(1),
sym!("y") => term!(2),
});
let dict_pattern = term!(pattern!(btreemap! {
sym!("x") => term!(1),
sym!("y") => term!(2),
}));
vm.push_goal(Goal::Isa {
left: dict.clone(),
right: dict_pattern.clone(),
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Result { hashmap!() }, QueryEvent::Done { result: true }]);
// Dicts with identical keys and different values DO NOT isa.
let different_dict_pattern = term!(pattern!(btreemap! {
sym!("x") => term!(2),
sym!("y") => term!(1),
}));
vm.push_goal(Goal::Isa {
left: dict.clone(),
right: different_dict_pattern,
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
let empty_dict = term!(btreemap! {});
let empty_dict_pattern = term!(pattern!(btreemap! {}));
// {} isa {}.
vm.push_goal(Goal::Isa {
left: empty_dict.clone(),
right: empty_dict_pattern.clone(),
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Result { hashmap!() }, QueryEvent::Done { result: true }]);
// Non-empty dicts should isa against an empty dict.
vm.push_goal(Goal::Isa {
left: dict.clone(),
right: empty_dict_pattern,
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Result { hashmap!() }, QueryEvent::Done { result: true }]);
// Empty dicts should NOT isa against a non-empty dict.
vm.push_goal(Goal::Isa {
left: empty_dict,
right: dict_pattern.clone(),
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
let subset_dict_pattern = term!(pattern!(btreemap! {sym!("x") => term!(1)}));
// Superset dict isa subset dict.
vm.push_goal(Goal::Isa {
left: dict,
right: subset_dict_pattern,
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Result { hashmap!() }, QueryEvent::Done { result: true }]);
// Subset dict isNOTa superset dict.
let subset_dict = term!(btreemap! {sym!("x") => term!(1)});
vm.push_goal(Goal::Isa {
left: subset_dict,
right: dict_pattern,
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
}
#[test]
fn unify_dicts() {
let mut vm = PolarVirtualMachine::default();
// Dicts with identical keys and values unify.
let left = term!(btreemap! {
sym!("x") => term!(1),
sym!("y") => term!(2),
});
let right = term!(btreemap! {
sym!("x") => term!(1),
sym!("y") => term!(2),
});
vm.push_goal(Goal::Unify {
left: left.clone(),
right,
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Result { hashmap!() }, QueryEvent::Done { result: true }]);
// Dicts with identical keys and different values DO NOT unify.
let right = term!(btreemap! {
sym!("x") => term!(2),
sym!("y") => term!(1),
});
vm.push_goal(Goal::Unify {
left: left.clone(),
right,
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
// Empty dicts unify.
vm.push_goal(Goal::Unify {
left: term!(btreemap! {}),
right: term!(btreemap! {}),
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Result { hashmap!() }, QueryEvent::Done { result: true }]);
// Empty dict should not unify against a non-empty dict.
vm.push_goal(Goal::Unify {
left: left.clone(),
right: term!(btreemap! {}),
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
// Subset match should fail.
let right = term!(btreemap! {
sym!("x") => term!(1),
});
vm.push_goal(Goal::Unify { left, right }).unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
}
#[test]
fn unify_nested_dicts() {
let mut vm = PolarVirtualMachine::default();
let left = term!(btreemap! {
sym!("x") => term!(btreemap!{
sym!("y") => term!(1)
})
});
let right = term!(btreemap! {
sym!("x") => term!(btreemap!{
sym!("y") => term!(sym!("result"))
})
});
vm.push_goal(Goal::Unify { left, right }).unwrap();
assert_query_events!(vm, [QueryEvent::Result { hashmap!{sym!("result") => term!(1)} }, QueryEvent::Done { result: true }]);
}
#[test]
fn lookup() {
let mut vm = PolarVirtualMachine::default();
let fields = btreemap! {
sym!("x") => term!(1),
};
let dict = Dictionary { fields };
vm.push_goal(Goal::Lookup {
dict: dict.clone(),
field: term!(string!("x")),
value: term!(1),
})
.unwrap();
assert_query_events!(vm, [
QueryEvent::Result{hashmap!{}}
]);
// Lookup with incorrect value
vm.push_goal(Goal::Lookup {
dict: dict.clone(),
field: term!(string!("x")),
value: term!(2),
})
.unwrap();
assert_query_events!(vm, [QueryEvent::Done { result: true }]);
// Lookup with unbound value
vm.push_goal(Goal::Lookup {
dict,
field: term!(string!("x")),
value: term!(sym!("y")),
})
.unwrap();
assert_query_events!(vm, [
QueryEvent::Result{hashmap!{sym!("y") => term!(1)}}
]);
}
#[test]
fn debug() {
let mut vm = PolarVirtualMachine::new_test(
Arc::new(RwLock::new(KnowledgeBase::new())),
false,
vec![Goal::Debug {
message: "Hello".to_string(),
}],
);
assert!(matches!(
vm.run(None).unwrap(),
QueryEvent::Debug { message } if &message[..] == "Hello"
));
}
#[test]
fn halt() {
let mut vm = PolarVirtualMachine::new_test(
Arc::new(RwLock::new(KnowledgeBase::new())),
false,
vec![Goal::Halt],
);
let _ = vm.run(None).unwrap();
assert_eq!(vm.goals.len(), 0);
assert_eq!(vm.bindings(true).len(), 0);
}
#[test]
fn unify() {
let x = sym!("x");
let y = sym!("y");
let vars = term!([x.clone(), y.clone()]);
let zero = value!(0);
let one = value!(1);
let vals = term!([zero.clone(), one.clone()]);
let mut vm = PolarVirtualMachine::new_test(
Arc::new(RwLock::new(KnowledgeBase::new())),
false,
vec![Goal::Unify {
left: vars,
right: vals,
}],
);
let _ = vm.run(None).unwrap();
assert_eq!(vm.variable_state(&x), VariableState::Bound(term!(zero)));
assert_eq!(vm.variable_state(&y), VariableState::Bound(term!(one)));
}
#[test]
fn unify_var() {
let x = sym!("x");
let y = sym!("y");
let z = sym!("z");
let one = term!(1);
let two = term!(2);
let mut vm = PolarVirtualMachine::default();
// Left variable bound to bound right variable.
vm.bind(&y, one.clone()).unwrap();
vm.append_goals(vec![Goal::Unify {
left: term!(x.clone()),
right: term!(y),
}])
.unwrap();
let _ = vm.run(None).unwrap();
assert_eq!(vm.deref(&term!(x)), one);
vm.backtrack().unwrap();
// Left variable bound to value.
vm.bind(&z, one.clone()).unwrap();
vm.append_goals(vec![Goal::Unify {
left: term!(z.clone()),
right: one.clone(),
}])
.unwrap();
let _ = vm.run(None).unwrap();
assert_eq!(vm.deref(&term!(z.clone())), one);
// Left variable bound to value, unify with something else, backtrack.
vm.append_goals(vec![Goal::Unify {
left: term!(z.clone()),
right: two,
}])
.unwrap();
let _ = vm.run(None).unwrap();
assert_eq!(vm.deref(&term!(z)), one);
}
#[test]
fn test_gen_var() {
let vm = PolarVirtualMachine::default();
let rule = Rule::new_from_test(
Symbol::new("foo"),
vec![],
Term::new_from_test(Value::Expression(Operation {
operator: Operator::And,
args: vec![
term!(1),
Term::new_from_test(Value::Variable(Symbol("x".to_string()))),
Term::new_from_test(Value::Variable(Symbol("x".to_string()))),
Term::new_from_test(Value::List(vec![Term::new_from_test(Value::Variable(
Symbol("y".to_string()),
))])),
],
})),
);
let renamed_rule = vm.rename_rule_vars(&rule);
let renamed_terms = unwrap_and(&renamed_rule.body);
assert_eq!(renamed_terms[1].value(), renamed_terms[2].value());
let x_value = match &renamed_terms[1].value() {
Value::Variable(sym) => Some(sym.0.clone()),
_ => None,
};
assert_eq!(x_value.unwrap(), "_x_1");
let y_value = match &renamed_terms[3].value() {
Value::List(terms) => match &terms[0].value() {
Value::Variable(sym) => Some(sym.0.clone()),
_ => None,
},
_ => None,
};
assert_eq!(y_value.unwrap(), "_y_2");
}
#[test]
fn test_filter_rules() {
let rule_a = Arc::new(rule!("bar", ["_"; instance!("a")]));
let rule_b = Arc::new(rule!("bar", ["_"; instance!("b")]));
let rule_1a = Arc::new(rule!("bar", [value!(1)]));
let rule_1b = Arc::new(rule!("bar", ["_"; value!(1)]));
let gen_rule = GenericRule::new(sym!("bar"), vec![rule_a, rule_b, rule_1a, rule_1b]);
let mut kb = KnowledgeBase::new();
kb.add_generic_rule(gen_rule);
let kb = Arc::new(RwLock::new(kb));
let external_instance = Value::ExternalInstance(ExternalInstance {
instance_id: 1,
constructor: None,
repr: None,
class_repr: None,
class_id: None,
});
let query = query!(call!("bar", [sym!("x")]));
let mut vm = PolarVirtualMachine::new_test(kb.clone(), false, vec![query]);
vm.bind(&sym!("x"), Term::new_from_test(external_instance))
.unwrap();
let mut external_isas = vec![];
loop {
match vm.run(None).unwrap() {
QueryEvent::Done { .. } => break,
QueryEvent::ExternalIsa {
call_id, class_tag, ..
} => {
external_isas.push(class_tag.clone());
// Return `true` if the specified `class_tag` is `"a"`.
vm.external_question_result(call_id, class_tag.0 == "a")
.unwrap()
}
QueryEvent::ExternalOp { .. }
| QueryEvent::ExternalIsSubSpecializer { .. }
| QueryEvent::Result { .. } => (),
e => panic!("Unexpected event: {:?}", e),
}
}
let expected = vec![sym!("b"), sym!("a"), sym!("a")];
assert_eq!(external_isas, expected);
let query = query!(call!("bar", [sym!("x")]));
let mut vm = PolarVirtualMachine::new_test(kb, false, vec![query]);
vm.bind(&sym!("x"), Term::new_from_test(value!(1))).unwrap();
let mut results = vec![];
loop {
match vm.run(None).unwrap() {
QueryEvent::Done { .. } => break,
QueryEvent::ExternalIsa { .. } => (),
QueryEvent::Result { bindings, .. } => results.push(bindings),
_ => panic!("Unexpected event"),
}
}
assert_eq!(results.len(), 2);
assert_eq!(
results,
vec![
hashmap! {sym!("x") => term!(1)},
hashmap! {sym!("x") => term!(1)},
]
);
}
#[test]
fn test_sort_rules() {
// Test sort rule by mocking ExternalIsSubSpecializer and ExternalIsa.
let bar_rule = GenericRule::new(
sym!("bar"),
vec![
Arc::new(rule!("bar", ["_"; instance!("b"), "_"; instance!("a"), value!(3)])),
Arc::new(rule!("bar", ["_"; instance!("a"), "_"; instance!("a"), value!(1)])),
Arc::new(rule!("bar", ["_"; instance!("a"), "_"; instance!("b"), value!(2)])),
Arc::new(rule!("bar", ["_"; instance!("b"), "_"; instance!("b"), value!(4)])),
],
);
let mut kb = KnowledgeBase::new();
kb.add_generic_rule(bar_rule);
let external_instance = Value::ExternalInstance(ExternalInstance {
instance_id: 1,
constructor: None,
repr: None,
class_repr: None,
class_id: None,
});
let mut vm = PolarVirtualMachine::new_test(
Arc::new(RwLock::new(kb)),
false,
vec![query!(call!(
"bar",
[external_instance.clone(), external_instance, sym!("z")]
))],
);
let mut results = Vec::new();
loop {
match vm.run(None).unwrap() {
QueryEvent::Done { .. } => break,
QueryEvent::Result { bindings, .. } => results.push(bindings),
QueryEvent::ExternalIsSubSpecializer {
call_id,
left_class_tag,
right_class_tag,
..
} => {
// For this test we sort classes lexically.
vm.external_question_result(call_id, left_class_tag < right_class_tag)
.unwrap()
}
QueryEvent::MakeExternal { .. } => (),
QueryEvent::ExternalOp {
operator: Operator::Eq,
call_id,
..
} => vm.external_question_result(call_id, true).unwrap(),
QueryEvent::ExternalIsa { call_id, .. } => {
// For this test, anything is anything.
vm.external_question_result(call_id, true).unwrap()
}
_ => panic!("Unexpected event"),
}
}
assert_eq!(results.len(), 4);
assert_eq!(
results,
vec![
hashmap! {sym!("z") => term!(1)},
hashmap! {sym!("z") => term!(2)},
hashmap! {sym!("z") => term!(3)},
hashmap! {sym!("z") => term!(4)},
]
);
}
#[test]
fn test_is_subspecializer() {
let mut vm = PolarVirtualMachine::default();
// Test `is_subspecializer` case where:
// - arg: `ExternalInstance`
// - left: `InstanceLiteral`
// - right: `Dictionary`
let arg = term!(Value::ExternalInstance(ExternalInstance {
instance_id: 1,
constructor: None,
repr: None,
class_repr: None,
class_id: None,
}));
let left = term!(value!(Pattern::Instance(InstanceLiteral {
tag: sym!("Any"),
fields: Dictionary {
fields: btreemap! {}
}
})));
let right = term!(Value::Pattern(Pattern::Dictionary(Dictionary {
fields: btreemap! {sym!("a") => term!("a")},
})));
let answer = vm.kb.read().unwrap().gensym("is_subspecializer");
match vm.is_subspecializer(&answer, &left, &right, &arg).unwrap() {
QueryEvent::None => (),
event => panic!("Expected None, got {:?}", event),
}
assert_eq!(
vm.deref(&term!(Value::Variable(answer))),
term!(value!(true))
);
}
#[test]
fn test_timeout() {
let vm = PolarVirtualMachine::default();
assert!(vm.query_timeout_ms == DEFAULT_TIMEOUT_MS);
std::env::set_var("POLAR_TIMEOUT_MS", "0");
let vm = PolarVirtualMachine::default();
std::env::remove_var("POLAR_TIMEOUT_MS");
assert!(vm.is_query_timeout_disabled());
std::env::set_var("POLAR_TIMEOUT_MS", "500");
let mut vm = PolarVirtualMachine::default();
std::env::remove_var("POLAR_TIMEOUT_MS");
// Turn this off so we don't hit it.
vm.set_stack_limit(std::usize::MAX);
loop {
vm.push_goal(Goal::Noop).unwrap();
vm.push_goal(Goal::MakeExternal {
constructor: Term::from(true),
instance_id: 1,
})
.unwrap();
let result = vm.run(None);
match result {
Ok(event) => assert!(matches!(event, QueryEvent::MakeExternal { .. })),
Err(err) => {
assert!(matches!(
err.0,
ErrorKind::Runtime(RuntimeError::QueryTimeout { .. })
));
// End test.
break;
}
}
}
}
#[test]
fn test_prefiltering() {
let bar_rule = GenericRule::new(
sym!("bar"),
vec![
Arc::new(rule!("bar", [value!([1])])),
Arc::new(rule!("bar", [value!([2])])),
],
);
let mut kb = KnowledgeBase::new();
kb.add_generic_rule(bar_rule);
let mut vm = PolarVirtualMachine::new_test(Arc::new(RwLock::new(kb)), false, vec![]);
vm.bind(&sym!("x"), term!(1)).unwrap();
let _ = vm.run(None);
let _ = vm.next(Rc::new(query!(call!("bar", [value!([sym!("x")])]))));
// After calling the query goal we should be left with the
// prefiltered rules
let next_goal = vm
.goals
.iter()
.find(|g| matches!(g.as_ref(), Goal::FilterRules { .. }))
.unwrap();
let goal_debug = format!("{:#?}", next_goal);
assert!(
matches!(next_goal.as_ref(), Goal::FilterRules {
ref applicable_rules, ref unfiltered_rules, ..
} if unfiltered_rules.len() == 1 && applicable_rules.is_empty()),
"Goal should contain just one prefiltered rule: {}",
goal_debug
);
}
#[test]
fn choose_conditional() {
let mut vm = PolarVirtualMachine::new_test(
Arc::new(RwLock::new(KnowledgeBase::new())),
false,
vec![],
);
let consequent = Goal::Debug {
message: "consequent".to_string(),
};
let alternative = Goal::Debug {
message: "alternative".to_string(),
};
// Check consequent path when conditional succeeds.
vm.choose_conditional(
vec![Goal::Noop],
vec![consequent.clone()],
vec![alternative.clone()],
)
.unwrap();
assert_query_events!(vm, [
QueryEvent::Debug { message } if &message[..] == "consequent" && vm.is_halted(),
QueryEvent::Done { result: true }
]);
// Check alternative path when conditional fails.
vm.choose_conditional(
vec![Goal::Backtrack],
vec![consequent.clone()],
vec![alternative.clone()],
)
.unwrap();
assert_query_events!(vm, [
QueryEvent::Debug { message } if &message[..] == "alternative" && vm.is_halted(),
QueryEvent::Done { result: true }
]);
// Ensure bindings are cleaned up after conditional.
vm.choose_conditional(
vec![
Goal::Unify {
left: term!(sym!("x")),
right: term!(true),
},
query!(sym!("x")),
],
vec![consequent],
vec![alternative],
)
.unwrap();
assert_query_events!(vm, [
QueryEvent::Debug { message } if &message[..] == "consequent" && vm.bindings(true).is_empty() && vm.is_halted(),
QueryEvent::Done { result: true }
]);
}
#[test]
fn test_log_level_should_print_for_level() {
use LogLevel::*;
// TRACE
assert!(Trace.should_print_on_level(Trace));
assert!(Trace.should_print_on_level(Debug));
assert!(Trace.should_print_on_level(Info));
// DEBUG
assert!(!Debug.should_print_on_level(Trace));
assert!(Debug.should_print_on_level(Debug));
assert!(Debug.should_print_on_level(Info));
// INFO
assert!(!Info.should_print_on_level(Trace));
assert!(!Info.should_print_on_level(Debug));
assert!(Info.should_print_on_level(Info));
}
}
| 36.761108 | 156 | 0.477907 |
dd3d2236012be9e215018aa10045d5a7b65f5a3a | 3,018 | use poem_openapi::{
payload::{Json, PlainText},
registry::{MetaMediaType, MetaRequest, MetaSchema, MetaSchemaRef},
types::ParseFromJSON,
ApiExtractor, ApiRequest, Object,
};
#[derive(Debug, Object, Eq, PartialEq)]
struct CreateUser {
user: String,
password: String,
}
/// MyRequest
///
/// ABC
#[derive(Debug, ApiRequest, Eq, PartialEq)]
enum MyRequest {
CreateByJson(Json<CreateUser>),
CreateByPlainText(PlainText<String>),
}
#[test]
fn meta() {
assert_eq!(
MyRequest::request_meta().unwrap(),
MetaRequest {
description: Some("MyRequest\n\nABC"),
content: vec![
MetaMediaType {
content_type: "application/json",
schema: MetaSchemaRef::Reference("CreateUser"),
},
MetaMediaType {
content_type: "text/plain",
schema: MetaSchemaRef::Inline(Box::new(MetaSchema::new("string"))),
}
],
required: true
}
);
}
#[tokio::test]
async fn from_request() {
let request = poem::Request::builder()
.content_type("application/json")
.body(
serde_json::to_vec(&serde_json::json!({
"user": "sunli",
"password": "123456",
}))
.unwrap(),
);
let (request, mut body) = request.split();
assert_eq!(
MyRequest::from_request(&request, &mut body, Default::default())
.await
.unwrap(),
MyRequest::CreateByJson(Json(CreateUser {
user: "sunli".to_string(),
password: "123456".to_string()
}))
);
let request = poem::Request::builder()
.content_type("text/plain")
.body("abcdef".to_string());
let (request, mut body) = request.split();
assert_eq!(
MyRequest::from_request(&request, &mut body, Default::default())
.await
.unwrap(),
MyRequest::CreateByPlainText(PlainText("abcdef".to_string()))
);
}
#[tokio::test]
async fn generic() {
#[derive(Debug, ApiRequest, Eq, PartialEq)]
enum MyRequest<T: ParseFromJSON> {
CreateByJson(Json<T>),
}
let request = poem::Request::builder()
.content_type("application/json")
.body(serde_json::to_vec(&serde_json::json!("hello")).unwrap());
assert_eq!(
MyRequest::<String>::request_meta().unwrap(),
MetaRequest {
description: None,
content: vec![MetaMediaType {
content_type: "application/json",
schema: MetaSchemaRef::Inline(Box::new(MetaSchema::new("string"))),
},],
required: true
}
);
let (request, mut body) = request.split();
assert_eq!(
MyRequest::<String>::from_request(&request, &mut body, Default::default())
.await
.unwrap(),
MyRequest::CreateByJson(Json("hello".to_string()))
);
}
| 27.688073 | 87 | 0.545726 |
ff54414fcc03bd28aca3ba316d9a55af2621819d | 2,333 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::Accessible;
use crate::Buildable;
use crate::ConstraintTarget;
use crate::Widget;
use glib::object::IsA;
use glib::translate::*;
use std::fmt;
use std::mem;
glib::wrapper! {
pub struct Native(Interface<ffi::GtkNative, ffi::GtkNativeInterface>) @requires Widget, Accessible, Buildable, ConstraintTarget;
match fn {
type_ => || ffi::gtk_native_get_type(),
}
}
impl Native {
#[doc(alias = "gtk_native_get_for_surface")]
#[doc(alias = "get_for_surface")]
pub fn for_surface<P: IsA<gdk::Surface>>(surface: &P) -> Option<Native> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gtk_native_get_for_surface(
surface.as_ref().to_glib_none().0,
))
}
}
}
pub const NONE_NATIVE: Option<&Native> = None;
pub trait NativeExt: 'static {
#[doc(alias = "gtk_native_get_renderer")]
#[doc(alias = "get_renderer")]
fn renderer(&self) -> Option<gsk::Renderer>;
#[doc(alias = "gtk_native_get_surface")]
#[doc(alias = "get_surface")]
fn surface(&self) -> Option<gdk::Surface>;
#[doc(alias = "gtk_native_get_surface_transform")]
#[doc(alias = "get_surface_transform")]
fn surface_transform(&self) -> (f64, f64);
}
impl<O: IsA<Native>> NativeExt for O {
fn renderer(&self) -> Option<gsk::Renderer> {
unsafe { from_glib_none(ffi::gtk_native_get_renderer(self.as_ref().to_glib_none().0)) }
}
fn surface(&self) -> Option<gdk::Surface> {
unsafe { from_glib_none(ffi::gtk_native_get_surface(self.as_ref().to_glib_none().0)) }
}
fn surface_transform(&self) -> (f64, f64) {
unsafe {
let mut x = mem::MaybeUninit::uninit();
let mut y = mem::MaybeUninit::uninit();
ffi::gtk_native_get_surface_transform(
self.as_ref().to_glib_none().0,
x.as_mut_ptr(),
y.as_mut_ptr(),
);
let x = x.assume_init();
let y = y.assume_init();
(x, y)
}
}
}
impl fmt::Display for Native {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("Native")
}
}
| 28.802469 | 132 | 0.604372 |
1e75002b04bc81760ace23e366245e4ddce8efbf | 29,273 | #[cfg(not(feature = "library"))]
use cosmwasm_std::entry_point;
use cosmwasm_std::{
attr, to_binary, Addr, BankMsg, Binary, Coin, CosmosMsg, Deps, DepsMut, Env, MessageInfo,
Response, StdResult, Uint128, WasmMsg,
};
use std::cmp::{max, min};
use crate::collateral::{
liquidate_collateral, lock_collateral, query_all_collaterals, query_borrow_limit,
query_collaterals, unlock_collateral,
};
use crate::error::ContractError;
use crate::querier::query_epoch_state;
use crate::state::{
read_config, read_dynrate_config, read_dynrate_state, read_epoch_state, read_whitelist,
read_whitelist_elem, store_config, store_dynrate_config, store_dynrate_state,
store_epoch_state, store_whitelist_elem, Config, DynrateConfig, DynrateState, EpochState,
WhitelistElem,
};
use cosmwasm_bignumber::{Decimal256, Uint256};
use moneymarket::common::optional_addr_validate;
use moneymarket::custody::ExecuteMsg as CustodyExecuteMsg;
use moneymarket::market::EpochStateResponse;
use moneymarket::market::ExecuteMsg as MarketExecuteMsg;
use moneymarket::overseer::{
ConfigResponse, ExecuteMsg, InstantiateMsg, MigrateMsg, QueryMsg, WhitelistResponse,
WhitelistResponseElem,
};
use moneymarket::querier::{deduct_tax, query_balance};
pub const BLOCKS_PER_YEAR: u128 = 4656810;
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn instantiate(
deps: DepsMut,
env: Env,
_info: MessageInfo,
msg: InstantiateMsg,
) -> StdResult<Response> {
store_config(
deps.storage,
&Config {
owner_addr: deps.api.addr_canonicalize(&msg.owner_addr)?,
oracle_contract: deps.api.addr_canonicalize(&msg.oracle_contract)?,
market_contract: deps.api.addr_canonicalize(&msg.market_contract)?,
liquidation_contract: deps.api.addr_canonicalize(&msg.liquidation_contract)?,
collector_contract: deps.api.addr_canonicalize(&msg.collector_contract)?,
stable_denom: msg.stable_denom,
epoch_period: msg.epoch_period,
threshold_deposit_rate: msg.threshold_deposit_rate,
target_deposit_rate: msg.target_deposit_rate,
buffer_distribution_factor: msg.buffer_distribution_factor,
anc_purchase_factor: msg.anc_purchase_factor,
price_timeframe: msg.price_timeframe,
},
)?;
store_dynrate_config(
deps.storage,
&DynrateConfig {
dyn_rate_epoch: msg.dyn_rate_epoch,
dyn_rate_maxchange: msg.dyn_rate_maxchange,
dyn_rate_yr_increase_expectation: msg.dyn_rate_yr_increase_expectation,
dyn_rate_min: msg.dyn_rate_min,
dyn_rate_max: msg.dyn_rate_max,
},
)?;
store_epoch_state(
deps.storage,
&EpochState {
deposit_rate: Decimal256::zero(),
prev_aterra_supply: Uint256::zero(),
prev_interest_buffer: Uint256::zero(),
prev_exchange_rate: Decimal256::one(),
last_executed_height: env.block.height,
},
)?;
store_dynrate_state(
deps.storage,
&DynrateState {
last_executed_height: env.block.height,
prev_yield_reserve: Decimal256::zero(),
},
)?;
Ok(Response::default())
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn migrate(deps: DepsMut, env: Env, msg: MigrateMsg) -> StdResult<Response> {
store_dynrate_config(
deps.storage,
&DynrateConfig {
dyn_rate_epoch: msg.dyn_rate_epoch,
dyn_rate_maxchange: msg.dyn_rate_maxchange,
dyn_rate_yr_increase_expectation: msg.dyn_rate_yr_increase_expectation,
dyn_rate_min: msg.dyn_rate_min,
dyn_rate_max: msg.dyn_rate_max,
},
)?;
let mut config = read_config(deps.storage)?;
let prev_yield_reserve = query_balance(
deps.as_ref(),
env.contract.address.clone(),
config.stable_denom.clone(),
)?;
store_dynrate_state(
deps.storage,
&DynrateState {
last_executed_height: env.block.height,
prev_yield_reserve: Decimal256::from_ratio(prev_yield_reserve, 1),
},
)?;
let new_rate = max(
min(config.threshold_deposit_rate, msg.dyn_rate_max),
msg.dyn_rate_min,
);
config.threshold_deposit_rate = new_rate;
config.target_deposit_rate = new_rate;
store_config(deps.storage, &config)?;
Ok(Response::default())
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn execute(
deps: DepsMut,
env: Env,
info: MessageInfo,
msg: ExecuteMsg,
) -> Result<Response, ContractError> {
match msg {
ExecuteMsg::UpdateConfig {
owner_addr,
oracle_contract,
liquidation_contract,
threshold_deposit_rate,
target_deposit_rate,
buffer_distribution_factor,
anc_purchase_factor,
epoch_period,
price_timeframe,
dyn_rate_epoch,
dyn_rate_maxchange,
dyn_rate_yr_increase_expectation,
dyn_rate_min,
dyn_rate_max,
} => {
let api = deps.api;
update_config(
deps,
info,
optional_addr_validate(api, owner_addr)?,
optional_addr_validate(api, oracle_contract)?,
optional_addr_validate(api, liquidation_contract)?,
threshold_deposit_rate,
target_deposit_rate,
buffer_distribution_factor,
anc_purchase_factor,
epoch_period,
price_timeframe,
dyn_rate_epoch,
dyn_rate_maxchange,
dyn_rate_yr_increase_expectation,
dyn_rate_min,
dyn_rate_max,
)
}
ExecuteMsg::Whitelist {
name,
symbol,
collateral_token,
custody_contract,
max_ltv,
} => {
let api = deps.api;
register_whitelist(
deps,
info,
name,
symbol,
api.addr_validate(&collateral_token)?,
api.addr_validate(&custody_contract)?,
max_ltv,
)
}
ExecuteMsg::UpdateWhitelist {
collateral_token,
custody_contract,
max_ltv,
} => {
let api = deps.api;
update_whitelist(
deps,
info,
api.addr_validate(&collateral_token)?,
optional_addr_validate(api, custody_contract)?,
max_ltv,
)
}
ExecuteMsg::ExecuteEpochOperations {} => execute_epoch_operations(deps, env),
ExecuteMsg::UpdateEpochState {
interest_buffer,
distributed_interest,
} => update_epoch_state(deps, env, info, interest_buffer, distributed_interest),
ExecuteMsg::LockCollateral { collaterals } => lock_collateral(deps, info, collaterals),
ExecuteMsg::UnlockCollateral { collaterals } => {
unlock_collateral(deps, env, info, collaterals)
}
ExecuteMsg::LiquidateCollateral { borrower } => {
let api = deps.api;
liquidate_collateral(deps, env, info, api.addr_validate(&borrower)?)
}
ExecuteMsg::FundReserve {} => fund_reserve(deps, info),
}
}
#[allow(clippy::too_many_arguments)]
pub fn update_config(
deps: DepsMut,
info: MessageInfo,
owner_addr: Option<Addr>,
oracle_contract: Option<Addr>,
liquidation_contract: Option<Addr>,
threshold_deposit_rate: Option<Decimal256>,
target_deposit_rate: Option<Decimal256>,
buffer_distribution_factor: Option<Decimal256>,
anc_purchase_factor: Option<Decimal256>,
epoch_period: Option<u64>,
price_timeframe: Option<u64>,
dyn_rate_epoch: Option<u64>,
dyn_rate_maxchange: Option<Decimal256>,
dyn_rate_yr_increase_expectation: Option<Decimal256>,
dyn_rate_min: Option<Decimal256>,
dyn_rate_max: Option<Decimal256>,
) -> Result<Response, ContractError> {
let mut config: Config = read_config(deps.storage)?;
let mut dynrate_config: DynrateConfig = read_dynrate_config(deps.storage)?;
if deps.api.addr_canonicalize(info.sender.as_str())? != config.owner_addr {
return Err(ContractError::Unauthorized {});
}
if let Some(owner_addr) = owner_addr {
config.owner_addr = deps.api.addr_canonicalize(&owner_addr.to_string())?;
}
if let Some(oracle_contract) = oracle_contract {
config.oracle_contract = deps.api.addr_canonicalize(&oracle_contract.to_string())?;
}
if let Some(liquidation_contract) = liquidation_contract {
config.liquidation_contract = deps
.api
.addr_canonicalize(&liquidation_contract.to_string())?;
}
if let Some(threshold_deposit_rate) = threshold_deposit_rate {
config.threshold_deposit_rate = threshold_deposit_rate;
}
if let Some(buffer_distribution_factor) = buffer_distribution_factor {
config.buffer_distribution_factor = buffer_distribution_factor;
}
if let Some(anc_purchase_factor) = anc_purchase_factor {
config.anc_purchase_factor = anc_purchase_factor;
}
if let Some(target_deposit_rate) = target_deposit_rate {
config.target_deposit_rate = target_deposit_rate;
}
if let Some(epoch_period) = epoch_period {
config.epoch_period = epoch_period;
}
if let Some(price_timeframe) = price_timeframe {
config.price_timeframe = price_timeframe;
}
if let Some(dyn_rate_epoch) = dyn_rate_epoch {
dynrate_config.dyn_rate_epoch = dyn_rate_epoch;
}
if let Some(dyn_rate_maxchange) = dyn_rate_maxchange {
dynrate_config.dyn_rate_maxchange = dyn_rate_maxchange;
}
if let Some(dyn_rate_yr_increase_expectation) = dyn_rate_yr_increase_expectation {
dynrate_config.dyn_rate_yr_increase_expectation = dyn_rate_yr_increase_expectation;
}
if let Some(dyn_rate_min) = dyn_rate_min {
dynrate_config.dyn_rate_min = dyn_rate_min;
}
if let Some(dyn_rate_max) = dyn_rate_max {
dynrate_config.dyn_rate_max = dyn_rate_max;
}
store_config(deps.storage, &config)?;
store_dynrate_config(deps.storage, &dynrate_config)?;
Ok(Response::new().add_attributes(vec![attr("action", "update_config")]))
}
pub fn register_whitelist(
deps: DepsMut,
info: MessageInfo,
name: String,
symbol: String,
collateral_token: Addr,
custody_contract: Addr,
max_ltv: Decimal256,
) -> Result<Response, ContractError> {
let config: Config = read_config(deps.storage)?;
if deps.api.addr_canonicalize(info.sender.as_str())? != config.owner_addr {
return Err(ContractError::Unauthorized {});
}
let collateral_token_raw = deps.api.addr_canonicalize(collateral_token.as_str())?;
if read_whitelist_elem(deps.storage, &collateral_token_raw).is_ok() {
return Err(ContractError::TokenAlreadyRegistered {});
}
store_whitelist_elem(
deps.storage,
&collateral_token_raw,
&WhitelistElem {
name: name.to_string(),
symbol: symbol.to_string(),
custody_contract: deps.api.addr_canonicalize(custody_contract.as_str())?,
max_ltv,
},
)?;
Ok(Response::new().add_attributes(vec![
attr("action", "register_whitelist"),
attr("name", name),
attr("symbol", symbol),
attr("collateral_token", collateral_token),
attr("custody_contract", custody_contract),
attr("LTV", max_ltv.to_string()),
]))
}
pub fn update_whitelist(
deps: DepsMut,
info: MessageInfo,
collateral_token: Addr,
custody_contract: Option<Addr>,
max_ltv: Option<Decimal256>,
) -> Result<Response, ContractError> {
let config: Config = read_config(deps.storage)?;
if deps.api.addr_canonicalize(info.sender.as_str())? != config.owner_addr {
return Err(ContractError::Unauthorized {});
}
let collateral_token_raw = deps.api.addr_canonicalize(collateral_token.as_str())?;
let mut whitelist_elem: WhitelistElem =
read_whitelist_elem(deps.storage, &collateral_token_raw)?;
if let Some(custody_contract) = custody_contract {
whitelist_elem.custody_contract = deps.api.addr_canonicalize(custody_contract.as_str())?;
}
if let Some(max_ltv) = max_ltv {
whitelist_elem.max_ltv = max_ltv;
}
store_whitelist_elem(deps.storage, &collateral_token_raw, &whitelist_elem)?;
Ok(Response::new().add_attributes(vec![
attr("action", "update_whitelist"),
attr("collateral_token", collateral_token),
attr(
"custody_contract",
deps.api.addr_humanize(&whitelist_elem.custody_contract)?,
),
attr("LTV", whitelist_elem.max_ltv.to_string()),
]))
}
fn update_deposit_rate(deps: DepsMut, env: Env) -> StdResult<()> {
let dynrate_config: DynrateConfig = read_dynrate_config(deps.storage)?;
let dynrate_state: DynrateState = read_dynrate_state(deps.storage)?;
let mut config: Config = read_config(deps.storage)?;
// check whether its time to re-evaluate rate
if env.block.height >= dynrate_state.last_executed_height + dynrate_config.dyn_rate_epoch {
// retrieve interest buffer
let interest_buffer = query_balance(
deps.as_ref(),
env.contract.address.clone(),
config.stable_denom.to_string(),
)?;
// convert block rate into yearly rate
let blocks_per_year = Decimal256::from_ratio(Uint256::from(BLOCKS_PER_YEAR), 1);
let current_rate = config.threshold_deposit_rate * blocks_per_year;
let yield_reserve = Decimal256::from_uint256(interest_buffer);
let mut yr_went_up = yield_reserve > dynrate_state.prev_yield_reserve;
// amount yield reserve changed in notional terms
let yield_reserve_delta = if yr_went_up {
yield_reserve - dynrate_state.prev_yield_reserve
} else {
dynrate_state.prev_yield_reserve - yield_reserve
};
// amount yield reserve changed in percentage terms
// if the prev yield reserve was zero; assume either a 100% decrease
// or a 100% increase, but this should be very rare
let mut yield_reserve_change = if dynrate_state.prev_yield_reserve.is_zero() {
Decimal256::one()
} else {
yield_reserve_delta / dynrate_state.prev_yield_reserve
};
// decreases the yield reserve change by dyn_rate_yr_increase_expectation
// (assume (yr_went_up, yield_reserve_change) is one signed integer, this just subtracts
// that integer by dynrate_config.dyn_rate_yr_increase_expectation)
let increase_expectation = dynrate_config.dyn_rate_yr_increase_expectation;
yield_reserve_change = if !yr_went_up {
yield_reserve_change + increase_expectation
} else if yield_reserve_change > increase_expectation {
yield_reserve_change - increase_expectation
} else {
yr_went_up = !yr_went_up;
increase_expectation - yield_reserve_change
};
yield_reserve_change = min(yield_reserve_change, dynrate_config.dyn_rate_maxchange);
let mut new_rate = if yr_went_up {
current_rate + yield_reserve_change
} else if current_rate > yield_reserve_change {
current_rate - yield_reserve_change
} else {
Decimal256::zero()
};
// convert from yearly rate to block rate
new_rate = new_rate / blocks_per_year;
// clamp new rate
new_rate = max(
min(new_rate, dynrate_config.dyn_rate_max),
dynrate_config.dyn_rate_min,
);
config.target_deposit_rate = new_rate;
config.threshold_deposit_rate = new_rate;
store_config(deps.storage, &config)?;
// store updated epoch state
store_dynrate_state(
deps.storage,
&DynrateState {
last_executed_height: env.block.height,
prev_yield_reserve: yield_reserve,
},
)?;
};
Ok(())
}
pub fn execute_epoch_operations(deps: DepsMut, env: Env) -> Result<Response, ContractError> {
let config: Config = read_config(deps.storage)?;
let state: EpochState = read_epoch_state(deps.storage)?;
if env.block.height < state.last_executed_height + config.epoch_period {
return Err(ContractError::EpochNotPassed(state.last_executed_height));
}
// # of blocks from the last executed height
let blocks = Uint256::from(env.block.height - state.last_executed_height);
// Compute next epoch state
let market_contract = deps.api.addr_humanize(&config.market_contract)?;
let epoch_state: EpochStateResponse = query_epoch_state(
deps.as_ref(),
market_contract.clone(),
env.block.height,
None,
)?;
// effective_deposit_rate = cur_exchange_rate / prev_exchange_rate
// deposit_rate = (effective_deposit_rate - 1) / blocks
let effective_deposit_rate = epoch_state.exchange_rate / state.prev_exchange_rate;
let deposit_rate =
(effective_deposit_rate - Decimal256::one()) / Decimal256::from_uint256(blocks);
let mut messages: Vec<CosmosMsg> = vec![];
let mut interest_buffer = query_balance(
deps.as_ref(),
env.contract.address.clone(),
config.stable_denom.to_string(),
)?;
// Send accrued_buffer * config.anc_purchase_factor amount stable token to collector
let accrued_buffer = interest_buffer - state.prev_interest_buffer;
let anc_purchase_amount = accrued_buffer * config.anc_purchase_factor;
if !anc_purchase_amount.is_zero() {
messages.push(CosmosMsg::Bank(BankMsg::Send {
to_address: deps
.api
.addr_humanize(&config.collector_contract)?
.to_string(),
amount: vec![deduct_tax(
deps.as_ref(),
Coin {
denom: config.stable_denom.to_string(),
amount: anc_purchase_amount.into(),
},
)?],
}));
}
// Deduct anc_purchase_amount from the interest_buffer
interest_buffer = interest_buffer - anc_purchase_amount;
// Distribute Interest Buffer to depositor
// Only executed when deposit rate < threshold_deposit_rate
let mut distributed_interest: Uint256 = Uint256::zero();
if deposit_rate < config.threshold_deposit_rate {
// missing_deposit_rate(_per_block)
let missing_deposit_rate = config.threshold_deposit_rate - deposit_rate;
let prev_deposits = state.prev_aterra_supply * state.prev_exchange_rate;
// missing_deposits = prev_deposits * missing_deposit_rate(_per_block) * blocks
let missing_deposits = prev_deposits * blocks * missing_deposit_rate;
let distribution_buffer = interest_buffer * config.buffer_distribution_factor;
// When there was not enough deposits happens,
// distribute interest to market contract
distributed_interest = std::cmp::min(missing_deposits, distribution_buffer);
interest_buffer = interest_buffer - distributed_interest;
if !distributed_interest.is_zero() {
// deduct tax
distributed_interest = Uint256::from(
deduct_tax(
deps.as_ref(),
Coin {
denom: config.stable_denom.to_string(),
amount: distributed_interest.into(),
},
)?
.amount,
);
// Send some portion of interest buffer to Market contract
messages.push(CosmosMsg::Bank(BankMsg::Send {
to_address: market_contract.to_string(),
amount: vec![Coin {
denom: config.stable_denom,
amount: distributed_interest.into(),
}],
}));
}
}
// Execute DistributeRewards
let whitelist: Vec<WhitelistResponseElem> = read_whitelist(deps.as_ref(), None, None)?;
for elem in whitelist.iter() {
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: elem.custody_contract.clone(),
funds: vec![],
msg: to_binary(&CustodyExecuteMsg::DistributeRewards {})?,
}));
}
// TODO: Should this become a reply? If so which SubMsg to make reply_on?
// Execute store epoch state operation
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: env.contract.address.to_string(),
funds: vec![],
msg: to_binary(&ExecuteMsg::UpdateEpochState {
interest_buffer,
distributed_interest,
})?,
}));
Ok(Response::new().add_messages(messages).add_attributes(vec![
attr("action", "epoch_operations"),
attr("deposit_rate", deposit_rate.to_string()),
attr("exchange_rate", epoch_state.exchange_rate.to_string()),
attr("aterra_supply", epoch_state.aterra_supply),
attr("distributed_interest", distributed_interest),
attr("anc_purchase_amount", anc_purchase_amount),
]))
}
pub fn update_epoch_state(
deps: DepsMut,
env: Env,
info: MessageInfo,
// To store interest buffer before receiving epoch staking rewards,
// pass interest_buffer from execute_epoch_operations
interest_buffer: Uint256,
distributed_interest: Uint256,
) -> Result<Response, ContractError> {
let config: Config = read_config(deps.storage)?;
let overseer_epoch_state: EpochState = read_epoch_state(deps.storage)?;
if info.sender != env.contract.address {
return Err(ContractError::Unauthorized {});
}
// # of blocks from the last executed height
let blocks = Uint256::from(env.block.height - overseer_epoch_state.last_executed_height);
// Compute next epoch state
let market_contract = deps.api.addr_humanize(&config.market_contract)?;
let market_epoch_state: EpochStateResponse = query_epoch_state(
deps.as_ref(),
market_contract.clone(),
env.block.height,
Some(distributed_interest),
)?;
// effective_deposit_rate = cur_exchange_rate / prev_exchange_rate
// deposit_rate = (effective_deposit_rate - 1) / blocks
let effective_deposit_rate =
market_epoch_state.exchange_rate / overseer_epoch_state.prev_exchange_rate;
let deposit_rate =
(effective_deposit_rate - Decimal256::one()) / Decimal256::from_uint256(blocks);
// store updated epoch state
store_epoch_state(
deps.storage,
&EpochState {
last_executed_height: env.block.height,
prev_aterra_supply: market_epoch_state.aterra_supply,
prev_exchange_rate: market_epoch_state.exchange_rate,
prev_interest_buffer: interest_buffer,
deposit_rate,
},
)?;
// use unchanged rates to build msg
let response_msg = to_binary(&MarketExecuteMsg::ExecuteEpochOperations {
deposit_rate,
target_deposit_rate: config.target_deposit_rate,
threshold_deposit_rate: config.threshold_deposit_rate,
distributed_interest,
})?;
// proceed with deposit rate update
update_deposit_rate(deps, env)?;
Ok(Response::new()
.add_message(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: market_contract.to_string(),
funds: vec![],
msg: response_msg,
}))
.add_attributes(vec![
attr("action", "update_epoch_state"),
attr("deposit_rate", deposit_rate.to_string()),
attr("aterra_supply", market_epoch_state.aterra_supply),
attr(
"exchange_rate",
market_epoch_state.exchange_rate.to_string(),
),
attr("interest_buffer", interest_buffer),
]))
}
pub fn fund_reserve(deps: DepsMut, info: MessageInfo) -> Result<Response, ContractError> {
let sent_uusd = match info.funds.iter().find(|x| x.denom == "uusd") {
Some(coin) => coin.amount,
None => Uint128::zero(),
};
let mut overseer_epoch_state: EpochState = read_epoch_state(deps.storage)?;
overseer_epoch_state.prev_interest_buffer += Uint256::from(sent_uusd);
store_epoch_state(deps.storage, &overseer_epoch_state)?;
let mut dyn_rate_state: DynrateState = read_dynrate_state(deps.storage)?;
dyn_rate_state.prev_yield_reserve += Decimal256::from_ratio(Uint256::from(sent_uusd), 1);
store_dynrate_state(deps.storage, &dyn_rate_state)?;
Ok(Response::new().add_attributes(vec![
attr("action", "fund_reserve"),
attr("funded_amount", sent_uusd.to_string()),
]))
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult<Binary> {
match msg {
QueryMsg::Config {} => to_binary(&query_config(deps)?),
QueryMsg::EpochState {} => to_binary(&query_state(deps)?),
QueryMsg::Whitelist {
collateral_token,
start_after,
limit,
} => to_binary(&query_whitelist(
deps,
optional_addr_validate(deps.api, collateral_token)?,
optional_addr_validate(deps.api, start_after)?,
limit,
)?),
QueryMsg::Collaterals { borrower } => to_binary(&query_collaterals(
deps,
deps.api.addr_validate(&borrower)?,
)?),
QueryMsg::AllCollaterals { start_after, limit } => to_binary(&query_all_collaterals(
deps,
optional_addr_validate(deps.api, start_after)?,
limit,
)?),
QueryMsg::BorrowLimit {
borrower,
block_time,
} => to_binary(&query_borrow_limit(
deps,
deps.api.addr_validate(&borrower)?,
block_time,
)?),
QueryMsg::DynrateState {} => to_binary(&query_dynrate_state(deps)?),
}
}
pub fn query_config(deps: Deps) -> StdResult<ConfigResponse> {
let config: Config = read_config(deps.storage)?;
let dynrate_config: DynrateConfig = read_dynrate_config(deps.storage)?;
Ok(ConfigResponse {
owner_addr: deps.api.addr_humanize(&config.owner_addr)?.to_string(),
oracle_contract: deps.api.addr_humanize(&config.oracle_contract)?.to_string(),
market_contract: deps.api.addr_humanize(&config.market_contract)?.to_string(),
liquidation_contract: deps
.api
.addr_humanize(&config.liquidation_contract)?
.to_string(),
collector_contract: deps
.api
.addr_humanize(&config.collector_contract)?
.to_string(),
stable_denom: config.stable_denom,
epoch_period: config.epoch_period,
threshold_deposit_rate: config.threshold_deposit_rate,
target_deposit_rate: config.target_deposit_rate,
buffer_distribution_factor: config.buffer_distribution_factor,
anc_purchase_factor: config.anc_purchase_factor,
price_timeframe: config.price_timeframe,
dyn_rate_epoch: dynrate_config.dyn_rate_epoch,
dyn_rate_maxchange: dynrate_config.dyn_rate_maxchange,
dyn_rate_yr_increase_expectation: dynrate_config.dyn_rate_yr_increase_expectation,
dyn_rate_min: dynrate_config.dyn_rate_min,
dyn_rate_max: dynrate_config.dyn_rate_max,
})
}
pub fn query_state(deps: Deps) -> StdResult<EpochState> {
read_epoch_state(deps.storage)
}
pub fn query_dynrate_state(deps: Deps) -> StdResult<DynrateState> {
read_dynrate_state(deps.storage)
}
pub fn query_whitelist(
deps: Deps,
collateral_token: Option<Addr>,
start_after: Option<Addr>,
limit: Option<u32>,
) -> StdResult<WhitelistResponse> {
if let Some(collateral_token) = collateral_token {
let whitelist_elem: WhitelistElem = read_whitelist_elem(
deps.storage,
&deps.api.addr_canonicalize(collateral_token.as_str())?,
)?;
Ok(WhitelistResponse {
elems: vec![WhitelistResponseElem {
name: whitelist_elem.name,
symbol: whitelist_elem.symbol,
max_ltv: whitelist_elem.max_ltv,
custody_contract: deps
.api
.addr_humanize(&whitelist_elem.custody_contract)?
.to_string(),
collateral_token: collateral_token.to_string(),
}],
})
} else {
let start_after = if let Some(start_after) = start_after {
Some(deps.api.addr_canonicalize(start_after.as_str())?)
} else {
None
};
let whitelist: Vec<WhitelistResponseElem> = read_whitelist(deps, start_after, limit)?;
Ok(WhitelistResponse { elems: whitelist })
}
}
| 36.5 | 97 | 0.647388 |
752b625f529c084ce3aa077a32bc92b5e1f693a8 | 8,578 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module provides a simplified abstraction for working with
//! code blocks identified by their integer node-id. In particular,
//! it captures a common set of attributes that all "function-like
//! things" (represented by `FnLike` instances) share. For example,
//! all `FnLike` instances have a type signature (be it explicit or
//! inferred). And all `FnLike` instances have a body, i.e. the code
//! that is run when the function-like thing it represents is invoked.
//!
//! With the above abstraction in place, one can treat the program
//! text as a collection of blocks of code (and most such blocks are
//! nested within a uniquely determined `FnLike`), and users can ask
//! for the `Code` associated with a particular NodeId.
pub use self::Code::*;
use front::map::{self, Node};
use syntax::abi;
use rustc_front::hir::{Block, FnDecl};
use syntax::ast::{Name, NodeId};
use rustc_front::hir as ast;
use syntax::codemap::Span;
use rustc_front::intravisit::FnKind;
/// An FnLikeNode is a Node that is like a fn, in that it has a decl
/// and a body (as well as a NodeId, a span, etc).
///
/// More specifically, it is one of either:
/// - A function item,
/// - A closure expr (i.e. an ExprClosure), or
/// - The default implementation for a trait method.
///
/// To construct one, use the `Code::from_node` function.
#[derive(Copy, Clone)]
pub struct FnLikeNode<'a> { node: map::Node<'a> }
/// MaybeFnLike wraps a method that indicates if an object
/// corresponds to some FnLikeNode.
pub trait MaybeFnLike { fn is_fn_like(&self) -> bool; }
/// Components shared by fn-like things (fn items, methods, closures).
pub struct FnParts<'a> {
pub decl: &'a FnDecl,
pub body: &'a Block,
pub kind: FnKind<'a>,
pub span: Span,
pub id: NodeId,
}
impl MaybeFnLike for ast::Item {
fn is_fn_like(&self) -> bool {
match self.node { ast::ItemFn(..) => true, _ => false, }
}
}
impl MaybeFnLike for ast::TraitItem {
fn is_fn_like(&self) -> bool {
match self.node { ast::MethodTraitItem(_, Some(_)) => true, _ => false, }
}
}
impl MaybeFnLike for ast::Expr {
fn is_fn_like(&self) -> bool {
match self.node {
ast::ExprClosure(..) => true,
_ => false,
}
}
}
/// Carries either an FnLikeNode or a Block, as these are the two
/// constructs that correspond to "code" (as in, something from which
/// we can construct a control-flow graph).
#[derive(Copy, Clone)]
pub enum Code<'a> {
FnLikeCode(FnLikeNode<'a>),
BlockCode(&'a Block),
}
impl<'a> Code<'a> {
pub fn id(&self) -> NodeId {
match *self {
FnLikeCode(node) => node.id(),
BlockCode(block) => block.id,
}
}
/// Attempts to construct a Code from presumed FnLike or Block node input.
pub fn from_node(node: Node) -> Option<Code> {
if let map::NodeBlock(block) = node {
Some(BlockCode(block))
} else {
FnLikeNode::from_node(node).map(|fn_like| FnLikeCode(fn_like))
}
}
}
/// These are all the components one can extract from a fn item for
/// use when implementing FnLikeNode operations.
struct ItemFnParts<'a> {
name: Name,
decl: &'a ast::FnDecl,
unsafety: ast::Unsafety,
constness: ast::Constness,
abi: abi::Abi,
vis: ast::Visibility,
generics: &'a ast::Generics,
body: &'a Block,
id: NodeId,
span: Span
}
/// These are all the components one can extract from a closure expr
/// for use when implementing FnLikeNode operations.
struct ClosureParts<'a> {
decl: &'a FnDecl,
body: &'a Block,
id: NodeId,
span: Span
}
impl<'a> ClosureParts<'a> {
fn new(d: &'a FnDecl, b: &'a Block, id: NodeId, s: Span) -> ClosureParts<'a> {
ClosureParts { decl: d, body: b, id: id, span: s }
}
}
impl<'a> FnLikeNode<'a> {
/// Attempts to construct a FnLikeNode from presumed FnLike node input.
pub fn from_node(node: Node) -> Option<FnLikeNode> {
let fn_like = match node {
map::NodeItem(item) => item.is_fn_like(),
map::NodeTraitItem(tm) => tm.is_fn_like(),
map::NodeImplItem(_) => true,
map::NodeExpr(e) => e.is_fn_like(),
_ => false
};
if fn_like {
Some(FnLikeNode {
node: node
})
} else {
None
}
}
pub fn to_fn_parts(self) -> FnParts<'a> {
FnParts {
decl: self.decl(),
body: self.body(),
kind: self.kind(),
span: self.span(),
id: self.id(),
}
}
pub fn body(self) -> &'a Block {
self.handle(|i: ItemFnParts<'a>| &*i.body,
|_, _, _: &'a ast::MethodSig, _, body: &'a ast::Block, _| body,
|c: ClosureParts<'a>| c.body)
}
pub fn decl(self) -> &'a FnDecl {
self.handle(|i: ItemFnParts<'a>| &*i.decl,
|_, _, sig: &'a ast::MethodSig, _, _, _| &sig.decl,
|c: ClosureParts<'a>| c.decl)
}
pub fn span(self) -> Span {
self.handle(|i: ItemFnParts| i.span,
|_, _, _: &'a ast::MethodSig, _, _, span| span,
|c: ClosureParts| c.span)
}
pub fn id(self) -> NodeId {
self.handle(|i: ItemFnParts| i.id,
|id, _, _: &'a ast::MethodSig, _, _, _| id,
|c: ClosureParts| c.id)
}
pub fn kind(self) -> FnKind<'a> {
let item = |p: ItemFnParts<'a>| -> FnKind<'a> {
FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis)
};
let closure = |_: ClosureParts| {
FnKind::Closure
};
let method = |_, name: Name, sig: &'a ast::MethodSig, vis, _, _| {
FnKind::Method(name, sig, vis)
};
self.handle(item, method, closure)
}
fn handle<A, I, M, C>(self, item_fn: I, method: M, closure: C) -> A where
I: FnOnce(ItemFnParts<'a>) -> A,
M: FnOnce(NodeId,
Name,
&'a ast::MethodSig,
Option<ast::Visibility>,
&'a ast::Block,
Span)
-> A,
C: FnOnce(ClosureParts<'a>) -> A,
{
match self.node {
map::NodeItem(i) => match i.node {
ast::ItemFn(ref decl, unsafety, constness, abi, ref generics, ref block) =>
item_fn(ItemFnParts {
id: i.id,
name: i.name,
decl: &decl,
unsafety: unsafety,
body: &block,
generics: generics,
abi: abi,
vis: i.vis,
constness: constness,
span: i.span
}),
_ => panic!("item FnLikeNode that is not fn-like"),
},
map::NodeTraitItem(ti) => match ti.node {
ast::MethodTraitItem(ref sig, Some(ref body)) => {
method(ti.id, ti.name, sig, None, body, ti.span)
}
_ => panic!("trait method FnLikeNode that is not fn-like"),
},
map::NodeImplItem(ii) => {
match ii.node {
ast::ImplItemKind::Method(ref sig, ref body) => {
method(ii.id, ii.name, sig, Some(ii.vis), body, ii.span)
}
_ => {
panic!("impl method FnLikeNode that is not fn-like")
}
}
}
map::NodeExpr(e) => match e.node {
ast::ExprClosure(_, ref decl, ref block) =>
closure(ClosureParts::new(&decl, &block, e.id, e.span)),
_ => panic!("expr FnLikeNode that is not fn-like"),
},
_ => panic!("other FnLikeNode that is not fn-like"),
}
}
}
| 33.507813 | 91 | 0.534507 |
bfa9abfdf7db9d22bb129fb44e4e1a838251fae6 | 7,811 | mod anonymous_query_handler;
mod bitcoin;
mod canister_manager;
mod canister_settings;
pub mod execution;
mod execution_environment;
mod execution_environment_metrics;
mod history;
mod hypervisor;
mod ingress_filter;
mod metrics;
mod query_handler;
mod scheduler;
mod types;
pub mod util;
use crate::anonymous_query_handler::AnonymousQueryHandler;
pub use execution_environment::{
CanisterHeartbeatError, ExecutionEnvironment, ExecutionEnvironmentImpl,
};
pub use history::{IngressHistoryReaderImpl, IngressHistoryWriterImpl};
pub use hypervisor::{Hypervisor, HypervisorMetrics};
use ic_btc_canister::BitcoinCanister;
use ic_config::{execution_environment::Config, subnet_config::SchedulerConfig};
use ic_cycles_account_manager::CyclesAccountManager;
use ic_interfaces::execution_environment::AnonymousQueryService;
use ic_interfaces::execution_environment::{
IngressFilterService, IngressHistoryReader, IngressHistoryWriter, QueryExecutionService,
QueryHandler, Scheduler,
};
use ic_interfaces_state_manager::StateReader;
use ic_logger::ReplicaLogger;
use ic_metrics::MetricsRegistry;
use ic_registry_subnet_type::SubnetType;
use ic_replicated_state::{NetworkTopology, ReplicatedState};
use ic_types::{messages::CallContextId, SubnetId};
use ingress_filter::IngressFilter;
use query_handler::{HttpQueryHandler, InternalHttpQueryHandler};
use scheduler::SchedulerImpl;
use std::sync::{Arc, Mutex};
const MAX_BUFFERED_QUERIES: usize = 2000;
const CONCURRENT_QUERIES_PER_THREAD: usize = 4;
/// When executing a wasm method of query type, this enum indicates if we are
/// running in an replicated or non-replicated context. This information is
/// needed for various purposes and in particular to support the CoW memory
/// work.
#[doc(hidden)]
pub enum QueryExecutionType {
/// The execution is happening in a replicated context (i.e. consensus was
/// used to agree that this method should be executed). This should
/// generally indicate that the message being handled in an Ingress or an
/// inter-canister Request.
Replicated,
/// The execution is happening in a non-replicated context (i.e. consensus
/// was not used to agree that this method should be executed). This should
/// generally indicate that the message being handled is a Query message.
NonReplicated {
call_context_id: CallContextId,
network_topology: Arc<NetworkTopology>,
query_kind: NonReplicatedQueryKind,
},
}
/// This enum indicates whether execution of a non-replicated query
/// should keep track of the state or not.
#[doc(hidden)]
#[derive(Clone, PartialEq, Eq)]
pub enum NonReplicatedQueryKind {
Stateful,
Pure,
}
// This struct holds public facing components that are created by Execution.
pub struct ExecutionServices {
pub ingress_filter: IngressFilterService,
pub ingress_history_writer: Arc<dyn IngressHistoryWriter<State = ReplicatedState>>,
pub ingress_history_reader: Box<dyn IngressHistoryReader>,
pub sync_query_handler: Arc<dyn QueryHandler<State = ReplicatedState>>,
pub async_query_handler: QueryExecutionService,
pub anonymous_query_handler: AnonymousQueryService,
pub scheduler: Box<dyn Scheduler<State = ReplicatedState>>,
}
impl ExecutionServices {
/// Constructs the public facing components that the
/// `ExecutionEnvironment` crate exports.
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
pub fn setup_execution(
logger: ReplicaLogger,
metrics_registry: &MetricsRegistry,
own_subnet_id: SubnetId,
own_subnet_type: SubnetType,
scheduler_config: SchedulerConfig,
config: Config,
cycles_account_manager: Arc<CyclesAccountManager>,
state_reader: Arc<dyn StateReader<State = ReplicatedState>>,
) -> ExecutionServices {
let hypervisor = Arc::new(Hypervisor::new(
config.clone(),
metrics_registry,
own_subnet_id,
own_subnet_type,
logger.clone(),
Arc::clone(&cycles_account_manager),
));
let ingress_history_writer = Arc::new(IngressHistoryWriterImpl::new(
config.clone(),
logger.clone(),
metrics_registry,
));
let ingress_history_reader =
Box::new(IngressHistoryReaderImpl::new(Arc::clone(&state_reader)));
let exec_env = Arc::new(ExecutionEnvironmentImpl::new(
logger.clone(),
Arc::clone(&hypervisor),
Arc::clone(&ingress_history_writer) as Arc<_>,
metrics_registry,
own_subnet_id,
own_subnet_type,
scheduler_config.scheduler_cores,
config.clone(),
Arc::clone(&cycles_account_manager),
));
let sync_query_handler = Arc::new(InternalHttpQueryHandler::new(
logger.clone(),
hypervisor,
own_subnet_type,
config.clone(),
metrics_registry,
scheduler_config.max_instructions_per_message,
));
let threadpool = threadpool::Builder::new()
.num_threads(config.query_execution_threads)
.thread_name("query_execution".into())
.thread_stack_size(8_192_000)
.build();
let threadpool = Arc::new(Mutex::new(threadpool));
let async_query_handler = HttpQueryHandler::new_service(
MAX_BUFFERED_QUERIES,
config.query_execution_threads * CONCURRENT_QUERIES_PER_THREAD,
Arc::clone(&sync_query_handler) as Arc<_>,
Arc::clone(&threadpool),
Arc::clone(&state_reader),
);
let ingress_filter = IngressFilter::new_service(
MAX_BUFFERED_QUERIES,
config.query_execution_threads * CONCURRENT_QUERIES_PER_THREAD,
Arc::clone(&threadpool),
Arc::clone(&state_reader),
Arc::clone(&exec_env),
);
let anonymous_query_handler = AnonymousQueryHandler::new_service(
MAX_BUFFERED_QUERIES,
config.query_execution_threads * CONCURRENT_QUERIES_PER_THREAD,
threadpool,
Arc::clone(&state_reader),
Arc::clone(&exec_env),
scheduler_config.max_instructions_per_message,
);
let bitcoin_canister = Arc::new(BitcoinCanister::new(metrics_registry, logger.clone()));
let scheduler = Box::new(SchedulerImpl::new(
scheduler_config,
own_subnet_id,
Arc::clone(&ingress_history_writer) as Arc<_>,
Arc::clone(&exec_env) as Arc<_>,
Arc::clone(&cycles_account_manager),
bitcoin_canister,
metrics_registry,
logger,
config.rate_limiting_of_heap_delta,
config.rate_limiting_of_instructions,
));
Self {
ingress_filter,
ingress_history_writer,
ingress_history_reader,
sync_query_handler,
async_query_handler,
anonymous_query_handler,
scheduler,
}
}
#[allow(clippy::type_complexity)]
pub fn into_parts(
self,
) -> (
IngressFilterService,
Arc<dyn IngressHistoryWriter<State = ReplicatedState>>,
Box<dyn IngressHistoryReader>,
Arc<dyn QueryHandler<State = ReplicatedState>>,
QueryExecutionService,
AnonymousQueryService,
Box<dyn Scheduler<State = ReplicatedState>>,
) {
(
self.ingress_filter,
self.ingress_history_writer,
self.ingress_history_reader,
self.sync_query_handler,
self.async_query_handler,
self.anonymous_query_handler,
self.scheduler,
)
}
}
| 35.666667 | 96 | 0.676866 |
16fbcccbc2f548ccec44c050eef500beea1f0d20 | 994 | //! Http protocol support.
pub mod body;
mod builder;
pub mod client;
mod config;
#[cfg(feature = "compress")]
pub mod encoding;
pub(crate) mod helpers;
mod httpcodes;
mod httpmessage;
mod message;
mod payload;
mod request;
mod response;
mod service;
pub mod error;
pub mod h1;
pub mod h2;
pub mod header;
pub mod test;
pub(crate) use self::message::Message;
pub use self::builder::HttpServiceBuilder;
pub use self::client::Client;
pub use self::config::{DateService, KeepAlive, ServiceConfig};
pub use self::error::ResponseError;
pub use self::header::HeaderMap;
pub use self::httpmessage::HttpMessage;
pub use self::message::{ConnectionType, RequestHead, RequestHeadType, ResponseHead};
pub use self::payload::{Payload, PayloadStream};
pub use self::request::Request;
pub use self::response::{Response, ResponseBuilder};
pub use self::service::HttpService;
pub use crate::io::types::HttpProtocol;
// re-exports
pub use http::uri::{self, Uri};
pub use http::{Method, StatusCode, Version};
| 24.243902 | 84 | 0.751509 |
1ef60c0176df96d8c12099a30cde0099612cef41 | 3,558 | use crate::tasks::{TaskRunner, TaskRunnerInner};
use crate::FlutterEngineInner;
use log::trace;
use parking_lot::Mutex;
use std::os::raw::{c_char, c_uint, c_void};
pub extern "C" fn present(user_data: *mut c_void) -> bool {
trace!("present");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
engine.opengl_handler.swap_buffers()
}
}
pub extern "C" fn make_current(user_data: *mut c_void) -> bool {
trace!("make_current");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
engine.opengl_handler.make_current()
}
}
pub extern "C" fn clear_current(user_data: *mut c_void) -> bool {
trace!("clear_current");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
engine.opengl_handler.clear_current()
}
}
pub extern "C" fn fbo_callback(user_data: *mut c_void) -> c_uint {
trace!("fbo_callback");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
engine.opengl_handler.fbo_callback()
}
}
pub extern "C" fn make_resource_current(user_data: *mut c_void) -> bool {
trace!("make_resource_current");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
engine.opengl_handler.make_resource_current()
}
}
pub extern "C" fn gl_proc_resolver(user_data: *mut c_void, proc: *const c_char) -> *mut c_void {
trace!("gl_proc_resolver");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
engine.opengl_handler.gl_proc_resolver(proc)
}
}
pub extern "C" fn platform_message_callback(
platform_message: *const flutter_engine_sys::FlutterPlatformMessage,
user_data: *mut c_void,
) {
trace!("platform_message_callback");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
engine
.channel_registry
.read()
.handle((*platform_message).into());
}
}
pub extern "C" fn root_isolate_create_callback(_user_data: *mut c_void) {
trace!("root_isolate_create_callback");
// // This callback is executed on the main thread
// unsafe {
// let user_data = &mut *(user_data as *mut DesktopUserData);
// if let DesktopUserData::WindowState(window_state) = user_data {
// window_state.set_isolate_created();
// }
// }
}
pub extern "C" fn runs_task_on_current_thread(user_data: *mut c_void) -> bool {
trace!("runs_task_on_current_thread");
unsafe {
let inner = &*(user_data as *const Mutex<TaskRunnerInner>);
inner.lock().runs_task_on_current_thread()
}
}
pub extern "C" fn post_task(
task: flutter_engine_sys::FlutterTask,
target_time_nanos: u64,
user_data: *mut c_void,
) {
trace!("post_task");
unsafe {
let inner = &*(user_data as *const Mutex<TaskRunnerInner>);
let mut inner = inner.lock();
TaskRunner::post_task(&mut inner, task, target_time_nanos);
}
}
pub extern "C" fn gl_external_texture_frame(
user_data: *mut c_void,
texture_id: i64,
width: usize,
height: usize,
texture: *mut flutter_engine_sys::FlutterOpenGLTexture,
) -> bool {
trace!("gl_external_texture_frame");
unsafe {
let engine = &*(user_data as *const FlutterEngineInner);
if let Some(frame) = engine
.texture_registry
.get_texture_frame(texture_id, (width, height))
{
frame.into_ffi(&mut *texture);
return true;
}
false
}
}
| 29.404959 | 96 | 0.643339 |
b914cde90098349081117e0c96047b7270af02cf | 910 | use cef_simple::{Cef, WindowOptions};
use simplelog::*;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let cef = Cef::initialize(Some(8000), false)?;
CombinedLogger::init(vec![TermLogger::new(
LevelFilter::Trace,
Config::default(),
TerminalMode::Mixed,
simplelog::ColorChoice::Always
)])
.unwrap();
let page = urlencoding::encode(include_str!("page.html"));
cef.open_window(WindowOptions {
url: format!("data:text/html,{}", page),
title: Some("CEF Simple—On-Screen-Keyboard Demo".to_string()),
window_icon: Some(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/icon.png"
))),
window_app_icon: Some(include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/icon.png"
))),
..WindowOptions::default()
})?;
cef.run()?;
Ok(())
}
| 26 | 70 | 0.564835 |
1dd75b52facd32a85392be8aa5c1395f5110750b | 4,088 | pub mod attributes;
pub mod global_variable;
pub mod linkage;
pub mod metadata;
pub mod name;
pub mod parser;
pub mod preemption_specifier;
pub mod unnamed_addr;
pub mod visibility;
pub use parser::parse as parse_assembly;
use super::{
function::{Function, FunctionId, Parameter},
types::{TypeId, Types},
};
use attributes::Attribute;
use global_variable::GlobalVariable;
use id_arena::{Arena, Id};
use metadata::Metadata;
use name::Name;
use rustc_hash::FxHashMap;
use std::fmt;
#[derive(Debug, Clone)]
pub struct Target {
triple: String,
datalayout: String,
}
pub struct Module {
pub(crate) name: String,
pub(crate) source_filename: String,
pub(crate) target: Target,
pub(crate) functions: Arena<Function>,
pub(crate) attributes: FxHashMap<u32, Vec<Attribute>>,
pub(crate) global_variables: FxHashMap<Name, GlobalVariable>,
pub types: Types,
pub metas: FxHashMap<Name, Metadata>,
}
impl Default for Module {
fn default() -> Self {
Self {
name: "".to_string(),
source_filename: "".to_string(),
target: Target::new(),
functions: Arena::new(),
attributes: FxHashMap::default(),
global_variables: FxHashMap::default(),
types: Types::new(),
metas: FxHashMap::default(),
}
}
}
impl Module {
pub fn new() -> Self {
Self::default()
}
pub fn name(&self) -> &String {
&self.name
}
pub fn source_filename(&self) -> &String {
&self.source_filename
}
pub fn target(&self) -> &Target {
&self.target
}
pub fn functions(&self) -> &Arena<Function> {
&self.functions
}
pub fn functions_mut(&mut self) -> &mut Arena<Function> {
&mut self.functions
}
pub fn attributes(&self) -> &FxHashMap<u32, Vec<Attribute>> {
&self.attributes
}
pub fn global_variables(&self) -> &FxHashMap<Name, GlobalVariable> {
&self.global_variables
}
pub fn add_function(&mut self, f: Function) -> Id<Function> {
self.functions.alloc(f)
}
pub fn create_function<T: AsRef<str>>(
&mut self,
name: T,
result_ty: TypeId,
params: Vec<Parameter>,
is_var_arg: bool,
) -> Id<Function> {
self.functions.alloc(Function::new(
name,
result_ty,
params,
is_var_arg,
self.types.clone(),
))
}
pub fn find_function_by_name<T: AsRef<str>>(&self, name: T) -> Option<FunctionId> {
for (id, func) in &self.functions {
if func.name() == name.as_ref() {
return Some(id);
}
}
None
}
}
impl Default for Target {
fn default() -> Self {
Self {
triple: "".to_string(),
datalayout: "".to_string(),
}
}
}
impl Target {
pub fn new() -> Self {
Self::default()
}
pub fn triple(&self) -> &str {
self.triple.as_str()
}
pub fn datalayout(&self) -> &str {
self.datalayout.as_str()
}
}
impl fmt::Debug for Module {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "source_filename = \"{}\"", self.source_filename)?;
writeln!(f, "target datalayout = \"{}\"", self.target.datalayout)?;
writeln!(f, "target triple = \"{}\"", self.target.triple)?;
writeln!(f)?;
write!(f, "{:?}", self.types)?;
for gv in self.global_variables.values() {
writeln!(f, "{}", gv.to_string(&self.types))?;
}
writeln!(f)?;
for (_, func) in &self.functions {
writeln!(f, "{:?}", func)?;
}
for (id, attrs) in &self.attributes {
write!(f, "attributes #{} = {{ ", id)?;
for attr in attrs {
write!(f, "{:?} ", attr)?;
}
writeln!(f, "}}")?
}
for (n, meta) in &self.metas {
writeln!(f, "!{} = {:?}", n, meta)?;
}
Ok(())
}
}
| 24.047059 | 87 | 0.536204 |
391d6186dda61b52c8441615fdb662bc18928f2c | 1,132 | //https://cses.fi/alon/result/2273368/
use std::io::{BufRead};
use std::{io,cmp};
//some ideas from here https://www.youtube.com/watch?v=TOsD3BkIKoQ (in c++)
const MOD:usize = 1000000000 +7;
fn main() {
let stdin = io::stdin();
let mut iter_line = stdin.lock().lines();
let number = iter_line
.next()
.unwrap()
.expect("failed to read first line")
.parse::<usize>().unwrap();
let mut res:Vec<Vec<usize>>=Vec::with_capacity(number+1);
let size = cmp::max(2,number*number);
let mut interm:Vec<usize>=Vec::with_capacity(size/2);
for _i in 0..size/2 {
interm.push(0);
}
for _i in 0..number+1 {
res.push(interm.to_owned());
}
res[0][0]=1;
for i in 1..=number {
for x in 0..=(number*(number+1))/4 {
if x < i {
res[i][x]=res[i-1][x]%MOD;
} else {
res[i][x]=(res[i-1][x-i] + res[i-1][x])%MOD;
}
}
}
if (number*(number+1)%4) != 0 {
print!("{}",0);
} else {
print!("{}",(res[number][number*(number+1)/4]*500000004)%MOD);
}
}
| 28.3 | 76 | 0.507067 |
8a9b89af5f329c4d6debd3fa393e3d600404c9c4 | 434 | // primitive_types3.rs
// Create an array with at least 100 elements in it where the ??? is.
// Execute `rustlings hint primitive_types3` for hints!
// I AM NOT DONE
// let a = [0..100];
// println!("{}", a.len());
// Would print 1 here
fn main() {
let a = 0..100;
if a.len() >= 100 {
println!("Wow, that's a big array!");
} else {
println!("Meh, I eat arrays like that for breakfast.");
}
}
| 24.111111 | 69 | 0.56682 |
75cecfec4ccb32a207b7d58c9a06e911f140d4d3 | 11,444 | //! Definitions related to [VOD] files cache.
//!
//! [VOD]: https://en.wikipedia.org/wiki/Video_on_demand
use std::{
panic::AssertUnwindSafe,
path::{self, Path, PathBuf},
};
use anyhow::anyhow;
use ephyr_log::log;
use futures::{sink, FutureExt as _, StreamExt as _, TryStreamExt as _};
use tempfile::TempDir;
use tokio::{fs, io, sync::mpsc};
use tokio_util::compat::FuturesAsyncReadCompatExt as _;
use url::Url;
use crate::util::display_panic;
/// Manager of [VOD] files cache.
///
/// It downloads the requested URLs in background and returns their path once
/// they appear in cache.
///
/// [VOD]: https://en.wikipedia.org/wiki/Video_on_demand
#[derive(Debug)]
pub struct Manager {
/// Absolute path to the directory where cache files are downloaded to and
/// persisted in.
cache_dir: PathBuf,
/// Queue of tasks to perform downloading.
downloads: mpsc::UnboundedSender<Url>,
/// Directory where temporary downloading files are created.
///
/// It cleans up automatically on [`Drop`].
///
/// The path where this directory will be created can be manipulated via
/// [`TMPDIR` env var][1].
///
/// [1]: https://en.wikipedia.org/wiki/TMPDIR
_tmp_dir: TempDir,
}
impl Manager {
/// Number of maximum allowed concurrent downloads at the same time.
pub const CONCURRENT_DOWNLOADS: usize = 4;
/// Creates new [`Manager`] running the background downloads queue
/// processing.
///
/// # Errors
///
/// - If specified `dir` doesn't exist or cannot be resolved.
/// - If temporary directory cannot be created.
pub fn try_new<P: AsRef<Path>>(dir: P) -> io::Result<Self> {
let cache_dir = dir.as_ref().canonicalize()?;
let tmp_dir = tempfile::Builder::new()
.prefix("ephyr-vod-cache.")
.tempdir()?;
let (tx, rx) = mpsc::unbounded_channel::<Url>();
drop(tokio::spawn(Self::run_downloads(
rx,
cache_dir.clone(),
tmp_dir.path().to_owned(),
)));
Ok(Self {
cache_dir,
downloads: tx,
_tmp_dir: tmp_dir,
})
}
/// Returns the path of a cached file for the given [`Url`], if there is in
/// cache any.
///
/// If there is no cached file for the given [`Url`], then schedules it for
/// downloading.
///
/// # Errors
///
/// - If the given [`Url`] is not supported for downloading.
/// - If the given [`Url`] cannot be scheduled for downloading.
pub async fn get_cached_path(
&self,
url: &Url,
) -> Result<Option<PathBuf>, anyhow::Error> {
let full =
self.cache_dir
.join(Self::url_to_relative_path(url).ok_or_else(|| {
anyhow!("Unsupported downloading URL: {}", url)
})?);
match fs::metadata(&full).await {
Ok(m) if m.is_file() => match full.strip_prefix(&self.cache_dir) {
Ok(m) => Ok(Some(m.to_owned())),
Err(e) => Err(anyhow!(
"Failed to strip prefix of '{}' path: {}",
full.display(),
e
)),
},
Err(e) if e.kind() != io::ErrorKind::NotFound => Err(anyhow!(
"Failed to check '{}' file existence: {}",
full.display(),
e,
)),
_ => {
self.downloads.send(url.clone()).map_err(|e| {
anyhow!(
"Failed to schedule '{}' URL for downloading: {}",
url,
e,
)
})?;
Ok(None)
}
}
}
/// Runs job, which awaits for new [`Url`]s for downloading and performs
/// at most [`Manager::CONCURRENT_DOWNLOADS`] count of downloads at the same
/// moment.
///
/// The job finishes once [`Manager`] is dropped.
async fn run_downloads(
downloads: mpsc::UnboundedReceiver<Url>,
dst: PathBuf,
tmp: PathBuf,
) {
let _ = downloads
.map(move |url| {
let dst = dst.clone();
let tmp = tmp.clone();
async move {
AssertUnwindSafe(Self::download(&url, &dst, &tmp))
.catch_unwind()
.await
.map_err(|p| {
log::error!(
"Panicked while downloading '{}' URL to VOD \
cache: {}",
url,
display_panic(&p),
);
})
.unwrap_err();
}
})
.buffer_unordered(Self::CONCURRENT_DOWNLOADS)
.map(Ok)
.forward(sink::drain())
.await;
}
/// Downloads the given [`Url`] into `dst_dir` using `tmp_dir` for keeping
/// temporary file while downloading happens.
///
/// The temporary file is required to avoid any problems with partially
/// downloaded files. That's why, first, the file is downloaded into
/// `tmp_dir`, and only after downloading is fully complete, it's moved
/// to `dst_dir`.
///
/// # Errors
///
/// - If file in `tmp_dir` or `dst_dir` cannot be created.
/// - If the given [`Url`] couldn't be reached or responses with non-success
/// HTTP code.
/// - If downloading of file from the given [`Url`] fails or is interrupted.
#[allow(clippy::too_many_lines)]
async fn download(
url: &Url,
dst_dir: &Path,
tmp_dir: &Path,
) -> Result<(), anyhow::Error> {
let rel_path = Self::url_to_relative_path(url)
.ok_or_else(|| anyhow!("Unsupported downloading URL: {}", url))?;
let dst_path = dst_dir.join(&rel_path);
// Early check whether file was downloaded already.
if matches!(
fs::metadata(&dst_path).await.map(|m| m.is_file()),
Ok(true)
) {
log::debug!(
"URL '{}' already downloaded to '{}' VOD cache file, skipping",
url,
dst_path.display(),
);
return Ok(());
}
let tmp_path = tmp_dir.join(&rel_path);
// Early check whether file is downloading at the moment.
if matches!(
fs::metadata(&tmp_path).await.map(|m| m.is_file()),
Ok(true)
) {
log::debug!(
"URL '{}' already downloading at the moment to '{}' VOD cache \
file, skipping",
url,
dst_path.display(),
);
return Ok(());
}
// Prepare parent directory for temporary file.
if let Some(path) = tmp_path.as_path().parent() {
fs::create_dir_all(path).await.map_err(|e| {
anyhow!("Failed to create '{}' dir: {}", path.display(), e)
})?;
}
log::info!(
"Start downloading '{}' URL to '{}' VOD cache file",
url,
dst_path.display(),
);
let mut resp = reqwest::get(url.clone())
.await
.map_err(|e| anyhow!("Failed to perform GET '{}': {}", url, e))?
.error_for_status()
.map_err(|e| anyhow!("Bad response for GET '{}': {}", url, e))?
.bytes_stream()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
.into_async_read()
.compat();
let tmp_file = fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&tmp_path)
.await
.map(Some)
.or_else(|e| {
if let io::ErrorKind::NotFound = e.kind() {
Ok(None)
} else {
Err(e)
}
})
.map_err(|e| {
anyhow!("Failed to create '{}' file: {}", tmp_path.display(), e)
})?;
if tmp_file.is_none() {
return Ok(());
}
let mut tmp_file = tmp_file.unwrap();
let _ = io::copy(&mut resp, &mut tmp_file).await.map_err(|e| {
anyhow!(
"Failed to download into '{}' file: {}",
tmp_path.display(),
e,
)
})?;
match fs::metadata(&dst_path).await {
// Check whether file has been downloaded concurrently.
Ok(m) if m.is_file() => {
log::info!(
"URL '{}' has been already concurrently downloaded to '{}' \
VOD cache file, skipping",
url,
dst_path.display(),
);
return Ok(());
}
// Remove if there is a directory with the same name.
Ok(m) if m.is_dir() => {
fs::remove_dir_all(&dst_path).await.map_err(|e| {
anyhow!(
"Failed to remove '{}' dir: {}",
dst_path.display(),
e,
)
})?;
}
_ => {}
}
// Prepare parent directory for destination file.
if let Some(path) = dst_path.as_path().parent() {
fs::create_dir_all(path).await.map_err(|e| {
anyhow!("Failed to create '{}' dir: {}", path.display(), e)
})?;
}
if fs::rename(&tmp_path, &dst_path).await.is_err() {
// If moving file has failed (due to moving onto another physical
// disk, for example), then try to copy and delete it explicitly.
let _ = fs::copy(&tmp_path, &dst_path).await.map_err(|e| {
anyhow!(
"Failed to move downloaded file from '{}' to '{}': {}",
tmp_path.display(),
dst_path.display(),
e,
)
})?;
fs::remove_file(&tmp_path).await.map_err(|e| {
anyhow!(
"Failed to remove '{}' file: {}",
tmp_path.display(),
e,
)
})?;
}
log::info!(
"Successfully downloaded URL '{}' to '{}' VOD cache file",
url,
dst_path.display(),
);
Ok(())
}
/// Extracts path of the file in cache from the given [`Url`].
///
/// If [`None`] is returned, then such [`Url`] is not supported for
/// downloading.
#[must_use]
pub fn url_to_relative_path(url: &Url) -> Option<PathBuf> {
let prefix = match url.host() {
Some(url::Host::Domain("api.allatra.video")) => "/storage/videos",
_ => return None,
};
let path = Path::new(url.path()).strip_prefix(prefix).ok()?;
// Path with `..` segments are not supported due to security reasons:
// provided URL should never give a possibility to point outside the
// `Manager::cache_dir`.
if path.components().any(|c| c == path::Component::ParentDir) {
return None;
}
Some(path.to_owned())
}
}
| 33.364431 | 80 | 0.479553 |
29362087b85ed1b51510f418d871fe05b5fac386 | 1,224 | pub fn compute() {
let mut stretch = stretch2::Stretch::new();
let node0 = stretch
.new_node(
stretch2::style::Style {
flex_grow: 0.2f32,
flex_shrink: 0f32,
flex_basis: stretch2::style::Dimension::Points(40f32),
..Default::default()
},
&[],
)
.unwrap();
let node1 = stretch
.new_node(stretch2::style::Style { flex_grow: 0.2f32, flex_shrink: 0f32, ..Default::default() }, &[])
.unwrap();
let node2 = stretch
.new_node(stretch2::style::Style { flex_grow: 0.4f32, flex_shrink: 0f32, ..Default::default() }, &[])
.unwrap();
let node = stretch
.new_node(
stretch2::style::Style {
size: stretch2::geometry::Size {
width: stretch2::style::Dimension::Points(500f32),
height: stretch2::style::Dimension::Points(200f32),
..Default::default()
},
..Default::default()
},
&[node0, node1, node2],
)
.unwrap();
stretch.compute_layout(node, stretch2::geometry::Size::undefined()).unwrap();
}
| 34.971429 | 109 | 0.496732 |
fbd657728c4a6a073ee4ff6aa212d91289a26101 | 2,720 | use heapless::Vec;
/// A user-defined URC matcher
///
/// This is used to detect and consume URCs that are not terminated with
/// standard response codes like "OK". An example could be an URC that returns
/// length-value (LV) encoded data without a terminator.
///
/// Note that you should only detect and consume but not process URCs.
/// Processing should be done by an [`AtatUrc`](trait.AtatUrc.html)
/// implementation.
///
/// A very simplistic example that can only handle the URC `+FOO,xx` (with
/// `xx` being two arbitrary characters) followed by CRLF:
///
/// ```
/// use atat::{UrcMatcher, UrcMatcherResult};
/// use heapless::Vec;
///
/// struct FooUrcMatcher {}
///
/// impl UrcMatcher for FooUrcMatcher {
/// fn process<const L: usize>(&mut self, buf: &mut Vec<u8, L>) -> UrcMatcherResult<L> {
/// if buf.starts_with(b"+FOO,") {
/// if buf.len() >= 9 {
/// if &buf[7..9] == b"\r\n" {
/// // URC is complete
/// let data = Vec::from_slice(&buf[..9]).unwrap();
/// *buf = Vec::from_slice(&buf[9..]).unwrap();
/// UrcMatcherResult::Complete(data)
/// } else {
/// // Invalid, reject
/// UrcMatcherResult::NotHandled
/// }
/// } else {
/// // Insufficient data
/// UrcMatcherResult::Incomplete
/// }
/// } else {
/// UrcMatcherResult::NotHandled
/// }
/// }
/// }
/// ```
pub trait UrcMatcher {
/// Take a look at `buf`. Then:
///
/// - If the buffer contains a full URC, remove these bytes from the buffer
/// and return [`Complete`] with the matched data.
/// - If it contains an incomplete URC, return [`Incomplete`].
/// - Otherwise, return [`NotHandled`].
///
/// [`Complete`]: enum.UrcMatcherResult.html#variant.Complete
/// [`Incomplete`]: enum.UrcMatcherResult.html#variant.Incomplete
/// [`NotHandled`]: enum.UrcMatcherResult.html#variant.NotHandled
fn process<const L: usize>(&mut self, buf: &mut Vec<u8, L>) -> UrcMatcherResult<L>;
}
/// The type returned from a custom URC matcher.
pub enum UrcMatcherResult<const L: usize> {
NotHandled,
Incomplete,
Complete(Vec<u8, L>),
}
/// A URC matcher that does nothing (it always returns [`NotHandled`][nothandled]).
///
/// [nothandled]: enum.UrcMatcherResult.html#variant.NotHandled
#[derive(Debug, Default)]
pub struct DefaultUrcMatcher;
impl UrcMatcher for DefaultUrcMatcher {
fn process<const L: usize>(&mut self, _: &mut Vec<u8, L>) -> UrcMatcherResult<L> {
UrcMatcherResult::NotHandled
}
}
| 35.324675 | 92 | 0.583824 |
b9bdd9eb257ba2b16d978f595f052507e8fe7d6a | 10,472 | #![allow(clippy::needless_doctest_main)]
#![doc(html_logo_url = "https://yew.rs/img/logo.png")]
#![cfg_attr(documenting, feature(doc_cfg))]
//! # Yew Framework - API Documentation
//!
//! Yew is a modern Rust framework for creating multi-threaded front-end web apps using WebAssembly
//!
//! - Features a macro for declaring interactive HTML with Rust expressions. Developers who have
//! experience using JSX in React should feel quite at home when using Yew.
//! - Achieves high performance by minimizing DOM API calls for each page render and by making it
//! easy to offload processing to background web workers.
//! - Supports JavaScript interoperability, allowing developers to leverage NPM packages and
//! integrate with existing JavaScript applications.
//!
//! ### Supported Targets (Client-Side Rendering)
//! - `wasm32-unknown-unknown`
//!
//! ### Note
//!
//! Server-Side Rendering should work on all targets when feature `ssr` is enabled.
//!
//! ### Supported Features:
//! - `csr`: Enables Client-side Rendering support and [`Renderer`]. Only enable this feature if you
//! are making a Yew application (not a library).
//! - `ssr`: Enables Server-side Rendering support and [`ServerRenderer`].
//! - `tokio`: Enables future-based APIs on non-wasm32 targets with tokio runtime. (You may want to
//! enable this if your application uses future-based APIs and it does not compile / lint on
//! non-wasm32 targets.)
//! - `hydration`: Enables Hydration support.
//!
//! ## Example
//!
//! ```rust
//! use yew::prelude::*;
//!
//! enum Msg {
//! AddOne,
//! }
//!
//! struct App {
//! value: i64,
//! }
//!
//! impl Component for App {
//! type Message = Msg;
//! type Properties = ();
//!
//! fn create(ctx: &Context<Self>) -> Self {
//! Self { value: 0 }
//! }
//!
//! fn update(&mut self, _ctx: &Context<Self>, msg: Self::Message) -> bool {
//! match msg {
//! Msg::AddOne => {
//! self.value += 1;
//! true
//! }
//! }
//! }
//!
//! fn view(&self, ctx: &Context<Self>) -> Html {
//! html! {
//! <div>
//! <button onclick={ctx.link().callback(|_| Msg::AddOne)}>{ "+1" }</button>
//! <p>{ self.value }</p>
//! </div>
//! }
//! }
//! }
//!
//! # fn dont_execute() {
//! fn main() {
//! yew::Renderer::<App>::new().render();
//! }
//! # }
//! ```
#![deny(
missing_docs,
missing_debug_implementations,
bare_trait_objects,
anonymous_parameters,
elided_lifetimes_in_paths
)]
#![allow(macro_expanded_macro_exports_accessed_by_absolute_paths)]
#![recursion_limit = "512"]
extern crate self as yew;
/// This macro provides a convenient way to create [`Classes`].
///
/// The macro takes a list of items similar to the [`vec!`] macro and returns a [`Classes`]
/// instance. Each item can be of any type that implements `Into<Classes>` (See the
/// implementations on [`Classes`] to learn what types can be used).
///
/// # Example
///
/// ```
/// # use yew::prelude::*;
/// # fn test() {
/// let conditional_class = Some("my-other-class");
/// let vec_of_classes = vec![
/// "one-bean",
/// "two-beans",
/// "three-beans",
/// "a-very-small-casserole",
/// ];
///
/// html! {
/// <div class={classes!("my-container-class", conditional_class, vec_of_classes)}>
/// // ...
/// </div>
/// };
/// # }
/// ```
pub use yew_macro::classes;
/// This macro implements JSX-like templates.
///
/// This macro always returns [`Html`].
/// If you need to preserve the type of a component, use the [`html_nested!`] macro instead.
///
/// More information about using the `html!` macro can be found in the [Yew Docs]
///
/// [`Html`]: ./html/type.Html.html
/// [`html_nested!`]: ./macro.html_nested.html
/// [Yew Docs]: https://yew.rs/docs/next/concepts/html
pub use yew_macro::html;
/// This macro is similar to [`html!`], but preserves the component type instead
/// of wrapping it in [`Html`].
///
/// That macro is useful when, for example, in a typical implementation of a list
/// component (let's assume it's called `List`).
/// In a typical implementation you might find two component types -- `List` and `ListItem`.
/// Only `ListItem` components are allowed to be children of List`.
///
/// You can find an example implementation of this in the [`nested_list`] example.
/// That example shows, how to create static lists with their children.
///
/// ```
/// # use yew::prelude::*;
/// use yew::html::ChildrenRenderer;
/// use yew::virtual_dom::VChild;
///
/// #[derive(Clone, Properties, PartialEq)]
/// struct ListProps {
/// children: ChildrenRenderer<ListItem>,
/// }
///
/// struct List;
/// impl Component for List {
/// # type Message = ();
/// type Properties = ListProps;
/// // ...
/// # fn create(ctx: &Context<Self>) -> Self { Self }
/// # fn view(&self, ctx: &Context<Self>) -> Html { unimplemented!() }
/// }
///
/// #[derive(Clone, PartialEq)]
/// struct ListItem;
/// impl Component for ListItem {
/// # type Message = ();
/// # type Properties = ();
/// // ...
/// # fn create(ctx: &Context<Self>) -> Self { Self }
/// # fn view(&self, ctx: &Context<Self>) -> Html { unimplemented!() }
/// }
///
/// // Required for ChildrenRenderer
/// impl From<VChild<ListItem>> for ListItem {
/// fn from(child: VChild<ListItem>) -> Self {
/// Self
/// }
/// }
///
/// impl Into<Html> for ListItem {
/// fn into(self) -> Html {
/// html! { <self /> }
/// }
/// }
/// // You can use `List` with nested `ListItem` components.
/// // Using any other kind of element would result in a compile error.
/// # fn test() -> Html {
/// html! {
/// <List>
/// <ListItem/>
/// <ListItem/>
/// <ListItem/>
/// </List>
/// }
/// # }
/// # fn test_iter() -> Html {
/// # let some_iter = (0..10);
/// // In many cases you might want to create the content dynamically.
/// // To do this, you can use the following code:
/// html! {
/// <List>
/// { for some_iter.map(|_| html_nested!{ <ListItem/> }) }
/// </List>
/// }
/// # }
/// ```
///
/// If you used the [`html!`] macro instead of `html_nested!`, the code would
/// not compile because we explicitly indicated to the compiler that `List`
/// can only contain elements of type `ListItem` using [`ChildrenRenderer<ListItem>`],
/// while [`html!`] creates items of type [`Html`].
///
///
/// [`html!`]: ./macro.html.html
/// [`Html`]: ./html/type.Html.html
/// [`nested_list`]: https://github.com/yewstack/yew/tree/master/examples/nested_list
/// [`ChildrenRenderer<ListItem>`]: ./html/struct.ChildrenRenderer.html
pub use yew_macro::html_nested;
/// Build [`Properties`] outside of the [`html!`] macro.
///
/// It's already possible to create properties like normal Rust structs
/// but if there are lots of optional props the end result is often needlessly verbose.
/// This macro allows you to build properties the same way the [`html!`] macro does.
///
/// The macro doesn't support special props like `ref` and `key`, they need to be set in the
/// [`html!`] macro.
///
/// You can read more about `Properties` in the [Yew Docs].
///
/// # Example
///
/// ```
/// # use yew::prelude::*;
/// use std::borrow::Cow;
///
/// #[derive(Clone, Properties, PartialEq)]
/// struct Props {
/// #[prop_or_default]
/// id: usize,
/// name: Cow<'static, str>,
/// }
///
/// struct MyComponent(Props);
/// impl Component for MyComponent {
/// # type Message = ();
/// type Properties = Props;
/// // ...
/// # fn create(ctx: &Context<Self>) -> Self { unimplemented!() }
/// # fn view(&self, ctx: &Context<Self>) -> Html { unimplemented!() }
/// }
///
/// # fn foo() -> Html {
/// // You can build props directly ...
/// let props = yew::props!(Props {
/// name: Cow::from("Minka")
/// });
/// # assert_eq!(props.name, "Minka");
/// // ... or build the associated properties of a component
/// let props = yew::props!(MyComponent::Properties {
/// id: 2,
/// name: Cow::from("Lemmy")
/// });
/// # assert_eq!(props.id, 2);
///
/// // Use the Rust-like struct update syntax to create a component with the props.
/// html! {
/// <MyComponent key=1 ..props />
/// }
/// # }
/// ```
///
/// [`html!`]: ./macro.html.html
/// [`Properties`]: ./html/trait.Properties.html
/// [Yew Docs]: https://yew.rs/concepts/components/properties
pub use yew_macro::props;
/// This module contains macros which implements html! macro and JSX-like templates
pub mod macros {
pub use crate::{classes, html, html_nested, props};
}
pub mod callback;
pub mod context;
#[cfg(feature = "csr")]
mod dom_bundle;
pub mod functional;
pub mod html;
mod io_coop;
pub mod scheduler;
mod sealed;
#[cfg(feature = "ssr")]
mod server_renderer;
pub mod suspense;
pub mod utils;
pub mod virtual_dom;
#[cfg(feature = "ssr")]
pub use server_renderer::*;
#[cfg(feature = "csr")]
mod app_handle;
#[cfg(feature = "csr")]
mod renderer;
#[cfg(feature = "csr")]
#[cfg(test)]
pub mod tests;
/// The module that contains all events available in the framework.
pub mod events {
#[doc(no_inline)]
pub use web_sys::{
AnimationEvent, DragEvent, ErrorEvent, Event, FocusEvent, InputEvent, KeyboardEvent,
MouseEvent, PointerEvent, ProgressEvent, TouchEvent, TransitionEvent, UiEvent, WheelEvent,
};
#[cfg(feature = "csr")]
pub use crate::dom_bundle::set_event_bubbling;
pub use crate::html::TargetCast;
}
#[cfg(feature = "csr")]
pub use crate::app_handle::AppHandle;
#[cfg(feature = "csr")]
pub use crate::renderer::{set_custom_panic_hook, Renderer};
pub mod prelude {
//! The Yew Prelude
//!
//! The purpose of this module is to alleviate imports of many common types:
//!
//! ```
//! # #![allow(unused_imports)]
//! use yew::prelude::*;
//! ```
#[cfg(feature = "csr")]
pub use crate::app_handle::AppHandle;
pub use crate::callback::Callback;
pub use crate::context::{ContextHandle, ContextProvider};
pub use crate::events::*;
pub use crate::functional::*;
pub use crate::html::{
create_portal, BaseComponent, Children, ChildrenWithProps, Classes, Component, Context,
Html, HtmlResult, NodeRef, Properties,
};
pub use crate::macros::{classes, html, html_nested};
pub use crate::suspense::Suspense;
pub use crate::virtual_dom::AttrValue;
}
pub use self::prelude::*;
| 30.178674 | 100 | 0.610294 |
33da2791e5cb108b988917823b6167b22722e661 | 2,033 | //! This example shows how to send a query to watchman and print out the files
//! changed since the given timestamp.
use std::path::PathBuf;
use structopt::StructOpt;
use watchman_client::prelude::*;
#[derive(Debug, StructOpt)]
#[structopt(about = "Query files changed since a timestamp")]
struct Opt {
#[structopt()]
/// Specifies the clock. Use `watchman clock <PATH>` to retrieve the current clock of a watched
/// directory
clock: String,
#[structopt(short, long)]
/// [not recommended] Uses Unix timestamp as clock
unix_timestamp: bool,
#[structopt(short, long, default_value = ".")]
/// Specifies the path to watched directory
path: PathBuf,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if let Err(err) = run().await {
// Print a prettier error than the default
eprintln!("{}", err);
std::process::exit(1);
}
Ok(())
}
async fn run() -> Result<(), Box<dyn std::error::Error>> {
let opt = Opt::from_args();
let client = Connector::new().connect().await?;
let resolved = client
.resolve_root(CanonicalPath::canonicalize(opt.path)?)
.await?;
let clock_spec = if opt.unix_timestamp {
// it is better to use watchman's clock rather than Unix timestamp.
// see `watchman_client::pdu::ClockSpec::unix_timestamp` for details.
ClockSpec::UnixTimestamp(opt.clock.parse()?)
} else {
ClockSpec::StringClock(opt.clock)
};
let result = client
.query::<NameOnly>(
&resolved,
QueryRequestCommon {
since: Some(Clock::Spec(clock_spec.clone())),
..Default::default()
},
)
.await?;
eprintln!("Clock is now: {:?}", result.clock);
if let Some(files) = result.files {
for file in files.iter() {
println!("{}", file.name.display());
}
} else {
eprintln!("no file changed since {:?}", clock_spec);
}
Ok(())
}
| 28.236111 | 99 | 0.591244 |
fea110ba860943dc0371e44df6faf39635e03633 | 2,081 | use crate::{
etl::{
collector::{Collector, OPTIMAL_BUFFER_CAPACITY},
data_provider::Entry,
},
kv::tables,
models::*,
stagedsync::stage::{ExecOutput, Stage, StageInput},
Cursor, MutableTransaction, StageId,
};
use async_trait::async_trait;
use tokio::pin;
use tokio_stream::StreamExt;
use tracing::*;
#[derive(Debug)]
pub struct BlockHashes;
#[async_trait]
impl<'db, RwTx> Stage<'db, RwTx> for BlockHashes
where
RwTx: MutableTransaction<'db>,
{
fn id(&self) -> StageId {
StageId("BlockHashes")
}
fn description(&self) -> &'static str {
"Generating BlockHashes => BlockNumber Mapping"
}
async fn execute<'tx>(&self, tx: &'tx mut RwTx, input: StageInput) -> anyhow::Result<ExecOutput>
where
'db: 'tx,
{
let mut bodies_cursor = tx.mutable_cursor(&tables::BlockBody).await?;
let mut blockhashes_cursor = tx.mutable_cursor(&tables::HeaderNumber.erased()).await?;
let mut highest_block = input.stage_progress.unwrap_or(BlockNumber(0));
let mut collector = Collector::new(OPTIMAL_BUFFER_CAPACITY);
let walker = bodies_cursor.walk(Some(highest_block + 1));
pin!(walker);
while let Some(((block_number, block_hash), _)) = walker.try_next().await? {
if block_number.0 % 50_000 == 0 {
info!("Processing block {}", block_number);
}
// BlockBody Key is block_number + hash, so we just separate and collect
collector.collect(Entry::new(block_hash, block_number));
highest_block = block_number;
}
collector.load(&mut blockhashes_cursor).await?;
Ok(ExecOutput::Progress {
stage_progress: highest_block,
done: true,
must_commit: true,
})
}
async fn unwind<'tx>(
&self,
tx: &'tx mut RwTx,
input: crate::stagedsync::stage::UnwindInput,
) -> anyhow::Result<()>
where
'db: 'tx,
{
let _ = tx;
let _ = input;
todo!()
}
}
| 28.121622 | 100 | 0.596348 |
eb71698d001c579de8da7e79689d677de10bfdca | 518 | #[tokio::main]
async fn main() -> anyhow::Result<()> {
let mut storage = zksync_storage::StorageProcessor::establish_connection().await?;
storage
.chain()
.operations_schema()
.calculate_priority_ops_hashes()
.await?;
println!("Priority op hashes were calculated");
storage
.chain()
.operations_schema()
.calculate_batch_hashes()
.await?;
println!("Tx hashes for priority ops and batches are successfully calculated");
Ok(())
}
| 25.9 | 86 | 0.621622 |
39456a971edc72c90b73afbf3e4fe4adeb34f18e | 3,385 | #![warn(rust_2018_idioms)]
use bytes::buf::{BufExt, BufMutExt};
use bytes::{Buf, BufMut, Bytes};
#[cfg(feature = "std")]
use std::io::IoSlice;
use crates_unittest::test_case;
use std::prelude::v1::*;
#[test_case]
fn collect_two_bufs() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let res = a.chain(b).to_bytes();
assert_eq!(res, &b"helloworld"[..]);
}
#[test_case]
fn writing_chained() {
let mut a = [0u8; 64];
let mut b = [0u8; 64];
{
let mut buf = (&mut a[..]).chain_mut(&mut b[..]);
for i in 0u8..128 {
buf.put_u8(i);
}
}
for i in 0..64 {
let expect = i as u8;
assert_eq!(expect, a[i]);
assert_eq!(expect + 64, b[i]);
}
}
#[test_case]
fn iterating_two_bufs() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let res: Vec<u8> = a.chain(b).into_iter().collect();
assert_eq!(res, &b"helloworld"[..]);
}
#[cfg(feature = "std")]
#[test_case]
fn vectored_read() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let mut buf = a.chain(b);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(2, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"hello"[..]);
assert_eq!(iovecs[1][..], b"world"[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(2);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(2, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"llo"[..]);
assert_eq!(iovecs[1][..], b"world"[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(3);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(1, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"world"[..]);
assert_eq!(iovecs[1][..], b""[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(3);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(1, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"ld"[..]);
assert_eq!(iovecs[1][..], b""[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
}
| 24.708029 | 57 | 0.441064 |
7a4e164b62197942d8a1f91c4fdb231bb1f4beb7 | 7,243 | //! Preset color filters.
extern crate image;
use crate::colour_spaces;
use crate::colour_spaces::mix_with_colour;
use crate::effects::{adjust_contrast, inc_brightness};
use crate::{helpers, monochrome};
use crate::{PhotonImage, Rgb};
use image::{GenericImage, GenericImageView};
use wasm_bindgen::prelude::*;
/// Solarization on the Blue channel.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::neue(&mut img);
/// ```
#[wasm_bindgen]
pub fn neue(mut photon_image: &mut PhotonImage) {
let end = photon_image.get_raw_pixels().len() - 4;
for i in (0..end).step_by(4) {
let b_val = photon_image.raw_pixels[i + 2];
if 255 as i32 - b_val as i32 > 0 {
photon_image.raw_pixels[i + 2] = 255 - b_val;
}
}
}
/// Solarization on the Red and Green channels.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::lix(&mut img);
/// ```
#[wasm_bindgen]
pub fn lix(mut photon_image: &mut PhotonImage) {
let end = photon_image.get_raw_pixels().len() - 4;
for i in (0..end).step_by(4) {
let r_val = photon_image.raw_pixels[i];
let g_val = photon_image.raw_pixels[i + 1];
photon_image.raw_pixels[i] = 255 - r_val;
photon_image.raw_pixels[i + 1] = 255 - g_val;
}
}
/// Solarization on the Red and Blue channels.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::ryo(&mut img);
/// ```
#[wasm_bindgen]
pub fn ryo(mut photon_image: &mut PhotonImage) {
let end = photon_image.get_raw_pixels().len() - 4;
for i in (0..end).step_by(4) {
let r_val = photon_image.raw_pixels[i];
let b_val = photon_image.raw_pixels[i + 2];
photon_image.raw_pixels[i] = 255 - r_val;
photon_image.raw_pixels[i + 2] = 255 - b_val;
}
}
/// Apply a filter to an image. Over 20 filters are available.
/// The filters are as follows:
/// * **oceanic**: Add an aquamarine-tinted hue to an image.
/// * **islands**: Aquamarine tint.
/// * **marine**: Add a green/blue mixed hue to an image.
/// * **seagreen**: Dark green hue, with tones of blue.
/// * **flagblue**: Royal blue tint
/// * **liquid**: Blue-inspired tint.
/// * **diamante**: Custom filter with a blue/turquoise tint.
/// * **radio**: Fallout-style radio effect.
/// * **twenties**: Slight-blue tinted historical effect.
/// * **rosetint**: Rose-tinted filter.
/// * **mauve**: Purple-infused filter.
/// * **bluechrome**: Blue monochrome effect.
/// * **vintage**: Vintage filter with a red tint.
/// * **perfume**: Increase the blue channel, with moderate increases in the Red and Green channels.
/// * **serenity**: Custom filter with an increase in the Blue channel's values.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `filter_name` - The filter's name. Choose from the selection above, eg: "oceanic"
/// # Example
///
/// ```
/// // For example, to add a filter called "vintage" to an image:
/// use photon::filters;
/// photon::filters::filter(&mut img, "vintage");
/// ```
#[wasm_bindgen]
pub fn filter(img: &mut PhotonImage, filter_name: &str) {
let oceanic_rgb = Rgb::new(0, 89, 173);
let islands_rgb = Rgb::new(0, 24, 95);
let marine_rgb = Rgb::new(0, 14, 119);
let seagreen_rgb = Rgb::new(0, 68, 62);
let flagblue_rgb = Rgb::new(0, 0, 131);
let diamante_rgb = Rgb::new(30, 82, 87);
let liquid_rgb = Rgb::new(0, 10, 75);
let vintage_rgb = Rgb::new(120, 70, 13);
let perfume_rgb = Rgb::new(80, 40, 120);
let serenity_rgb = Rgb::new(10, 40, 90);
match filter_name {
// Match filter name to its corresponding function.
"oceanic" => mix_with_colour(img, oceanic_rgb, 0.2),
"islands" => mix_with_colour(img, islands_rgb, 0.2),
"marine" => mix_with_colour(img, marine_rgb, 0.2),
"seagreen" => mix_with_colour(img, seagreen_rgb, 0.2),
"flagblue" => mix_with_colour(img, flagblue_rgb, 0.2),
"diamante" => mix_with_colour(img, diamante_rgb, 0.1),
"liquid" => mix_with_colour(img, liquid_rgb, 0.2),
"radio" => monochrome::monochrome(img, 5, 40, 20),
"twenties" => monochrome::monochrome(img, 18, 12, 20),
"rosetint" => monochrome::monochrome(img, 80, 20, 31),
"mauve" => monochrome::monochrome(img, 90, 40, 80),
"bluechrome" => monochrome::monochrome(img, 20, 30, 60),
"vintage" => mix_with_colour(img, vintage_rgb, 0.2),
"perfume" => mix_with_colour(img, perfume_rgb, 0.2),
"serenity" => mix_with_colour(img, serenity_rgb, 0.2),
"golden" => golden(img),
"pastel_pink" => pastel_pink(img),
"cali" => cali(img),
"dramatic" => dramatic(img),
"firenze" => firenze(img),
"obsidian" => obsidian(img),
"lofi" => lofi(img),
_ => monochrome::monochrome(img, 90, 40, 80),
};
}
/// Apply a lofi effect to an image.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::lofi(&mut img);
/// ```
#[wasm_bindgen]
pub fn lofi(img: &mut PhotonImage) {
adjust_contrast(img, 30.0);
colour_spaces::saturate_hsl(img, 0.2);
}
/// Add a rose tint to an image.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::pastel_pink(&mut img);
/// ```
#[wasm_bindgen]
pub fn pastel_pink(img: &mut PhotonImage) {
let pastel_pink_rgb = Rgb::new(220, 112, 170);
mix_with_colour(img, pastel_pink_rgb, 0.1);
adjust_contrast(img, 30.0);
}
/// Apply a vintage, golden hue to an image.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::golden(&mut img);
/// ```
#[wasm_bindgen]
pub fn golden(img: &mut PhotonImage) {
let vignette_rgb = Rgb::new(235, 145, 50);
mix_with_colour(img, vignette_rgb, 0.2);
adjust_contrast(img, 30.0);
}
/// Increased contrast filter effect.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::cali(&mut img);
/// ```
#[wasm_bindgen]
pub fn cali(img: &mut PhotonImage) {
let cali_rgb = Rgb::new(255, 45, 75);
colour_spaces::mix_with_colour(img, cali_rgb, 0.1);
adjust_contrast(img, 50.0);
}
/// Increased contrast, greyscale effect.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::dramatic(&mut img);
/// ```
#[wasm_bindgen]
pub fn dramatic(img: &mut PhotonImage) {
monochrome::grayscale(img);
adjust_contrast(img, 60.0);
}
/// Apply a red hue, with increased contrast and brightness.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::firenze(&mut img);
/// ```
#[wasm_bindgen]
pub fn firenze(img: &mut PhotonImage) {
let cali_rgb = Rgb::new(255, 47, 78);
colour_spaces::mix_with_colour(img, cali_rgb, 0.1);
inc_brightness(img, 30);
adjust_contrast(img, 50.0);
}
/// Apply a greyscale effect with increased contrast.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// # Example
///
/// ```
/// photon::filters::obsidian(&mut img);
/// ```
#[wasm_bindgen]
pub fn obsidian(img: &mut PhotonImage) {
monochrome::grayscale(img);
adjust_contrast(img, 25.0);
}
| 28.403922 | 100 | 0.609554 |
698005b5d3068a10ddae00340a5e79ffd3f1bc8b | 820 | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue 46112: An extern crate pub re-exporting libcore was causing
// paths rooted from `std` to be misrendered in the diagnostic output.
// ignore-windows
// aux-build:xcrate_issue_46112_rexport_core.rs
extern crate xcrate_issue_46112_rexport_core;
fn test(r: Result<Option<()>, &'static str>) { }
fn main() { test(Ok(())); }
//~^ mismatched types
| 39.047619 | 70 | 0.740244 |
0e857c848582873de7e8ea6559b0d5d4c3cdeb2f | 7,166 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Object store that represents the Local File System.
use std::fs::{self, File, Metadata};
use std::io::{BufReader, Read, Seek, SeekFrom};
use std::sync::Arc;
use async_trait::async_trait;
use futures::{stream, AsyncRead, StreamExt};
use crate::datasource::object_store::{
FileMeta, FileMetaStream, ListEntryStream, ObjectReader, ObjectStore,
};
use crate::datasource::PartitionedFile;
use crate::error::DataFusionError;
use crate::error::Result;
use super::{ObjectReaderStream, SizedFile};
#[derive(Debug)]
/// Local File System as Object Store.
pub struct LocalFileSystem;
#[async_trait]
impl ObjectStore for LocalFileSystem {
async fn list_file(&self, prefix: &str) -> Result<FileMetaStream> {
list_all(prefix.to_owned()).await
}
async fn list_dir(
&self,
_prefix: &str,
_delimiter: Option<String>,
) -> Result<ListEntryStream> {
todo!()
}
fn file_reader(&self, file: SizedFile) -> Result<Arc<dyn ObjectReader>> {
Ok(Arc::new(LocalFileReader::new(file)?))
}
}
struct LocalFileReader {
file: SizedFile,
}
impl LocalFileReader {
fn new(file: SizedFile) -> Result<Self> {
Ok(Self { file })
}
}
#[async_trait]
impl ObjectReader for LocalFileReader {
async fn chunk_reader(
&self,
_start: u64,
_length: usize,
) -> Result<Box<dyn AsyncRead>> {
todo!(
"implement once async file readers are available (arrow-rs#78, arrow-rs#111)"
)
}
fn sync_chunk_reader(
&self,
start: u64,
length: usize,
) -> Result<Box<dyn Read + Send + Sync>> {
// A new file descriptor is opened for each chunk reader.
// This okay because chunks are usually fairly large.
let mut file = File::open(&self.file.path)?;
file.seek(SeekFrom::Start(start))?;
let file = BufReader::new(file.take(length as u64));
Ok(Box::new(file))
}
fn length(&self) -> u64 {
self.file.size
}
}
async fn list_all(prefix: String) -> Result<FileMetaStream> {
fn get_meta(path: String, metadata: Metadata) -> FileMeta {
FileMeta {
sized_file: SizedFile {
path,
size: metadata.len(),
},
last_modified: metadata.modified().map(chrono::DateTime::from).ok(),
}
}
async fn find_files_in_dir(
path: String,
to_visit: &mut Vec<String>,
) -> Result<Vec<FileMeta>> {
let mut dir = tokio::fs::read_dir(path).await?;
let mut files = Vec::new();
while let Some(child) = dir.next_entry().await? {
if let Some(child_path) = child.path().to_str() {
let metadata = child.metadata().await?;
if metadata.is_dir() {
to_visit.push(child_path.to_string());
} else {
files.push(get_meta(child_path.to_owned(), metadata))
}
} else {
return Err(DataFusionError::Plan("Invalid path".to_string()));
}
}
Ok(files)
}
let prefix_meta = tokio::fs::metadata(&prefix).await?;
let prefix = prefix.to_owned();
if prefix_meta.is_file() {
Ok(Box::pin(stream::once(async move {
Ok(get_meta(prefix, prefix_meta))
})))
} else {
let result = stream::unfold(vec![prefix], move |mut to_visit| async move {
match to_visit.pop() {
None => None,
Some(path) => {
let file_stream = match find_files_in_dir(path, &mut to_visit).await {
Ok(files) => stream::iter(files).map(Ok).left_stream(),
Err(e) => stream::once(async { Err(e) }).right_stream(),
};
Some((file_stream, to_visit))
}
}
})
.flatten();
Ok(Box::pin(result))
}
}
/// Create a stream of `ObjectReader` by converting each file in the `files` vector
/// into instances of `LocalFileReader`
pub fn local_object_reader_stream(files: Vec<String>) -> ObjectReaderStream {
Box::pin(futures::stream::iter(files).map(|f| Ok(local_object_reader(f))))
}
/// Helper method to convert a file location to a `LocalFileReader`
pub fn local_object_reader(file: String) -> Arc<dyn ObjectReader> {
LocalFileSystem
.file_reader(local_unpartitioned_file(file).file_meta.sized_file)
.expect("File not found")
}
/// Helper method to fetch the file size and date at given path and create a `FileMeta`
pub fn local_unpartitioned_file(file: String) -> PartitionedFile {
let metadata = fs::metadata(&file).expect("Local file metadata");
PartitionedFile {
file_meta: FileMeta {
sized_file: SizedFile {
size: metadata.len(),
path: file,
},
last_modified: metadata.modified().map(chrono::DateTime::from).ok(),
},
partition_values: vec![],
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::StreamExt;
use std::collections::HashSet;
use std::fs::create_dir;
use std::fs::File;
use tempfile::tempdir;
#[tokio::test]
async fn test_recursive_listing() -> Result<()> {
// tmp/a.txt
// tmp/x/b.txt
// tmp/y/c.txt
let tmp = tempdir()?;
let x_path = tmp.path().join("x");
let y_path = tmp.path().join("y");
let a_path = tmp.path().join("a.txt");
let b_path = x_path.join("b.txt");
let c_path = y_path.join("c.txt");
create_dir(&x_path)?;
create_dir(&y_path)?;
File::create(&a_path)?;
File::create(&b_path)?;
File::create(&c_path)?;
let mut all_files = HashSet::new();
let mut files = list_all(tmp.path().to_str().unwrap().to_string()).await?;
while let Some(file) = files.next().await {
let file = file?;
assert_eq!(file.size(), 0);
all_files.insert(file.path().to_owned());
}
assert_eq!(all_files.len(), 3);
assert!(all_files.contains(a_path.to_str().unwrap()));
assert!(all_files.contains(b_path.to_str().unwrap()));
assert!(all_files.contains(c_path.to_str().unwrap()));
Ok(())
}
}
| 31.292576 | 90 | 0.593916 |
6af5dddf98563c18059850385dee8116fb8e3a19 | 303 | pub mod channel_config;
pub mod message_channel;
pub mod message_list_header;
pub mod message_manager;
pub mod ordered_reliable_receiver;
pub mod reliable_receiver;
pub mod reliable_sender;
pub mod unordered_reliable_receiver;
pub mod unordered_unreliable_receiver;
pub mod unordered_unreliable_sender;
| 27.545455 | 38 | 0.867987 |
ed0519fb3c1136b7f2926df76c5dc9bdb737c2a6 | 6,370 | //! Tests for Unicode support and correct cursor transformation.
pub mod common;
use anyhow::Result;
use common::*;
use log::info;
use operational_transform::OperationSeq;
use rustpad_server::server;
use serde_json::json;
#[tokio::test]
async fn test_unicode_length() -> Result<()> {
pretty_env_logger::try_init().ok();
let filter = server();
expect_text(&filter, "unicode", "").await;
let mut client = connect(&filter, "unicode").await?;
let msg = client.recv().await?;
assert_eq!(msg, json!({ "Identity": 0 }));
let mut operation = OperationSeq::default();
operation.insert("h🎉e🎉l👨👨👦👦lo");
let msg = json!({
"Edit": {
"revision": 0,
"operation": operation
}
});
info!("sending ClientMsg {}", msg);
client.send(&msg).await;
let msg = client.recv().await?;
assert_eq!(
msg,
json!({
"History": {
"start": 0,
"operations": [
{ "id": 0, "operation": ["h🎉e🎉l👨👨👦👦lo"] }
]
}
})
);
info!("testing that text length is equal to number of Unicode code points...");
let mut operation = OperationSeq::default();
operation.delete(14);
let msg = json!({
"Edit": {
"revision": 1,
"operation": operation
}
});
info!("sending ClientMsg {}", msg);
client.send(&msg).await;
let msg = client.recv().await?;
assert_eq!(
msg,
json!({
"History": {
"start": 1,
"operations": [
{ "id": 0, "operation": [-14] }
]
}
})
);
expect_text(&filter, "unicode", "").await;
Ok(())
}
#[tokio::test]
async fn test_multiple_operations() -> Result<()> {
pretty_env_logger::try_init().ok();
let filter = server();
expect_text(&filter, "unicode", "").await;
let mut client = connect(&filter, "unicode").await?;
let msg = client.recv().await?;
assert_eq!(msg, json!({ "Identity": 0 }));
let mut operation = OperationSeq::default();
operation.insert("🎉😍𒀇👨👨👦👦"); // Emoticons and Cuneiform
let msg = json!({
"Edit": {
"revision": 0,
"operation": operation
}
});
info!("sending ClientMsg {}", msg);
client.send(&msg).await;
let msg = client.recv().await?;
assert_eq!(
msg,
json!({
"History": {
"start": 0,
"operations": [
{ "id": 0, "operation": ["🎉😍𒀇👨👨👦👦"] }
]
}
})
);
let mut operation = OperationSeq::default();
operation.insert("👯♂️");
operation.retain(3);
operation.insert("𐅣𐅤𐅥"); // Ancient Greek numbers
operation.retain(7);
let msg = json!({
"Edit": {
"revision": 1,
"operation": operation
}
});
info!("sending ClientMsg {}", msg);
client.send(&msg).await;
let msg = client.recv().await?;
assert_eq!(
msg,
json!({
"History": {
"start": 1,
"operations": [
{ "id": 0, "operation": ["👯♂️", 3, "𐅣𐅤𐅥", 7] }
]
}
})
);
expect_text(&filter, "unicode", "👯♂️🎉😍𒀇𐅣𐅤𐅥👨👨👦👦").await;
let mut operation = OperationSeq::default();
operation.retain(2);
operation.insert("h̷̙̤̏͊̑̍̆̃̉͝ĕ̶̠̌̓̃̓̽̃̚l̸̥̊̓̓͝͠l̸̨̠̣̟̥͠ỏ̴̳̖̪̟̱̰̥̞̙̏̓́͗̽̀̈́͛͐̚̕͝͝ ̶̡͍͙͚̞͙̣̘͙̯͇̙̠̀w̷̨̨̪͚̤͙͖̝͕̜̭̯̝̋̋̿̿̀̾͛̐̏͘͘̕͝ǒ̴̙͉͈̗̖͍̘̥̤̒̈́̒͠r̶̨̡̢̦͔̙̮̦͖͔̩͈̗̖̂̀l̶̡̢͚̬̤͕̜̀͛̌̈́̈́͑͋̈̍̇͊͝͠ď̵̛̛̯͕̭̩͖̝̙͎̊̏̈́̎͊̐̏͊̕͜͝͠͝"); // Lots of ligatures
operation.retain(8);
let msg = json!({
"Edit": {
"revision": 1,
"operation": operation
}
});
info!("sending ClientMsg {}", msg);
client.send(&msg).await;
let msg = client.recv().await?;
assert_eq!(
msg,
json!({
"History": {
"start": 2,
"operations": [
{ "id": 0, "operation": [6, "h̷̙̤̏͊̑̍̆̃̉͝ĕ̶̠̌̓̃̓̽̃̚l̸̥̊̓̓͝͠l̸̨̠̣̟̥͠ỏ̴̳̖̪̟̱̰̥̞̙̏̓́͗̽̀̈́͛͐̚̕͝͝ ̶̡͍͙͚̞͙̣̘͙̯͇̙̠̀w̷̨̨̪͚̤͙͖̝͕̜̭̯̝̋̋̿̿̀̾͛̐̏͘͘̕͝ǒ̴̙͉͈̗̖͍̘̥̤̒̈́̒͠r̶̨̡̢̦͔̙̮̦͖͔̩͈̗̖̂̀l̶̡̢͚̬̤͕̜̀͛̌̈́̈́͑͋̈̍̇͊͝͠ď̵̛̛̯͕̭̩͖̝̙͎̊̏̈́̎͊̐̏͊̕͜͝͠͝", 11] }
]
}
})
);
expect_text(&filter, "unicode", "👯♂️🎉😍h̷̙̤̏͊̑̍̆̃̉͝ĕ̶̠̌̓̃̓̽̃̚l̸̥̊̓̓͝͠l̸̨̠̣̟̥͠ỏ̴̳̖̪̟̱̰̥̞̙̏̓́͗̽̀̈́͛͐̚̕͝͝ ̶̡͍͙͚̞͙̣̘͙̯͇̙̠̀w̷̨̨̪͚̤͙͖̝͕̜̭̯̝̋̋̿̿̀̾͛̐̏͘͘̕͝ǒ̴̙͉͈̗̖͍̘̥̤̒̈́̒͠r̶̨̡̢̦͔̙̮̦͖͔̩͈̗̖̂̀l̶̡̢͚̬̤͕̜̀͛̌̈́̈́͑͋̈̍̇͊͝͠ď̵̛̛̯͕̭̩͖̝̙͎̊̏̈́̎͊̐̏͊̕͜͝͠͝𒀇𐅣𐅤𐅥👨👨👦👦").await;
Ok(())
}
#[tokio::test]
async fn test_unicode_cursors() -> Result<()> {
pretty_env_logger::try_init().ok();
let filter = server();
let mut client = connect(&filter, "unicode").await?;
assert_eq!(client.recv().await?, json!({ "Identity": 0 }));
let mut operation = OperationSeq::default();
operation.insert("🎉🎉🎉");
let msg = json!({
"Edit": {
"revision": 0,
"operation": operation
}
});
info!("sending ClientMsg {}", msg);
client.send(&msg).await;
client.recv().await?;
let cursors = json!({
"cursors": [0, 1, 2, 3],
"selections": [[0, 1], [2, 3]]
});
client.send(&json!({ "CursorData": cursors })).await;
let cursors_resp = json!({
"UserCursor": {
"id": 0,
"data": cursors
}
});
assert_eq!(client.recv().await?, cursors_resp);
let mut client2 = connect(&filter, "unicode").await?;
assert_eq!(client2.recv().await?, json!({ "Identity": 1 }));
client2.recv().await?;
assert_eq!(client2.recv().await?, cursors_resp);
let msg = json!({
"Edit": {
"revision": 0,
"operation": ["🎉"]
}
});
client2.send(&msg).await;
let mut client3 = connect(&filter, "unicode").await?;
assert_eq!(client3.recv().await?, json!({ "Identity": 2 }));
client3.recv().await?;
let transformed_cursors_resp = json!({
"UserCursor": {
"id": 0,
"data": {
"cursors": [1, 2, 3, 4],
"selections": [[1, 2], [3, 4]]
}
}
});
assert_eq!(client3.recv().await?, transformed_cursors_resp);
Ok(())
}
| 26.991525 | 253 | 0.43752 |
f4bc40e231446e08773cc64823b5734269bfc6f2 | 995 | use std::collections::HashMap;
use util;
fn main() {
let words = get_words();
let mut num_doubles = 0;
let mut num_triples = 0;
for word in &words {
let (has_double, has_triple) = process_word(word);
if has_double {
num_doubles += 1;
}
if has_triple {
num_triples += 1;
}
}
println!("checksum is: {}", num_doubles * num_triples);
}
fn process_word(word: &String) -> (bool, bool) {
let mut freq_map: HashMap<u8, u32> = HashMap::new();
for byte in word.as_bytes() {
let count = freq_map.entry(*byte).or_insert(0);
*count += 1;
}
let mut has_double = false;
let mut has_triple = false;
for (ch, freq) in &freq_map {
if *freq == 2 {
has_double = true;
} else if *freq == 3 {
has_triple = true;
}
}
(has_double, has_triple)
}
fn get_words() -> Vec<String> {
return util::read_file_lines("input.txt");
}
| 21.170213 | 59 | 0.542714 |
e40cdc9dd32d02cb8a78edea02e255cd4a92f3e9 | 5,992 | /*
* BSD 3-Clause License
*
* Copyright (c) 2020, InterlockLedger Network
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
use super::*;
use crate::io::array::ByteArrayReader;
use crate::tags::standard::constants::IL_STRING_TAG_ID;
use crate::tags::standard::explicit::ILStringTag;
use crate::tags::{ILDefaultWithIdTagCreator, ILRawTag};
#[test]
fn test_iltag_are_equal() {
let sample = "abcde";
let a = ILStringTag::with_value(sample);
let b = ILRawTag::with_value(IL_STRING_TAG_ID, sample.as_bytes());
let c = ILRawTag::with_value(1234, sample.as_bytes());
let d = ILStringTag::with_value("abcd");
let e = ILStringTag::with_value("bcdef");
assert!(iltag_are_equal(&a, &a));
assert!(iltag_are_equal(&a, &b));
assert!(!iltag_are_equal(&a, &c));
assert!(!iltag_are_equal(&a, &d));
assert!(!iltag_are_equal(&a, &e));
}
#[test]
fn test_iltag_clone_with_factory() {
let mut factory = ILStandardTagFactory::new(true);
factory.engine().register(
1234 as u64,
Box::new(ILDefaultWithIdTagCreator::<ILStringTag>::new()),
);
let sample = "abcde";
// Cloning a string
let a = ILStringTag::with_value(sample);
let clone = match iltag_clone_with_factory(&factory, &a) {
Ok(c) => c,
_ => panic!("Clone failed!"),
};
assert!(iltag_are_equal(&a, clone.as_ref()));
// Cloning a string but with an unknown id
let a = ILStringTag::with_id_value(1234, sample);
let clone = match iltag_clone_with_factory(&factory, &a) {
Ok(c) => c,
_ => panic!("Clone failed!"),
};
// This will result in a ILRawTag instead of ILStringTag.
assert_eq!(
clone.as_ref().as_any().type_id(),
std::any::TypeId::of::<ILStringTag>()
);
assert!(iltag_are_equal(&a, clone.as_ref()));
// Cloning a string but with an unknown id
let a = ILStringTag::with_id_value(1235, sample);
match iltag_clone_with_factory(&factory, &a) {
Err(_) => (),
_ => panic!("Clone should have failed."),
};
}
#[test]
fn test_iltag_clone() {
let sample = "abcde";
// Cloning a string
let a = ILStringTag::with_value(sample);
let clone = match iltag_clone(&a) {
Ok(c) => c,
_ => panic!("Clone failed!"),
};
assert!(iltag_are_equal(&a, clone.as_ref()));
// Cloning a string but with an unknown id
let a = ILStringTag::with_id_value(1234, sample);
let clone = match iltag_clone(&a) {
Ok(c) => c,
_ => panic!("Clone failed!"),
};
// This will result in a ILRawTag instead of ILStringTag.
assert_eq!(
clone.as_ref().as_any().type_id(),
std::any::TypeId::of::<ILRawTag>()
);
assert!(iltag_are_equal(&a, clone.as_ref()));
}
#[test]
fn test_limited_reader_ensure_empty() {
let sample: [u8; 1] = [0];
let mut reader = ByteArrayReader::new(&sample);
let lreader = LimitedReader::new(&mut reader, 1);
match limited_reader_ensure_empty(&lreader, ErrorKind::TagTooLarge) {
Err(ErrorKind::TagTooLarge) => (),
_ => panic!("Error expected."),
}
match limited_reader_ensure_empty(&lreader, ErrorKind::CorruptedData) {
Err(ErrorKind::CorruptedData) => (),
_ => panic!("Error expected."),
}
let lreader = LimitedReader::new(&mut reader, 0);
match limited_reader_ensure_empty(&lreader, ErrorKind::CorruptedData) {
Ok(()) => (),
_ => panic!("Error not expected."),
}
}
//=============================================================================
// UntouchbleTagFactory
//-----------------------------------------------------------------------------
#[test]
#[should_panic(expected = "UntouchbleTagFactory touched.")]
fn test_untouchbletagfactory_create_tag() {
let f = UntouchbleTagFactory::new();
f.create_tag(0);
}
#[test]
#[should_panic(expected = "UntouchbleTagFactory touched.")]
#[allow(unused_must_use)]
fn test_untouchbletagfactory_deserialize() {
let f = UntouchbleTagFactory::new();
let empty: [u8; 0] = [];
let mut reader = ByteArrayReader::new(&empty);
f.deserialize(&mut reader);
}
#[test]
#[should_panic(expected = "UntouchbleTagFactory touched.")]
#[allow(unused_must_use)]
fn test_untouchbletagfactory_deserialize_into() {
let f = UntouchbleTagFactory::new();
let empty: [u8; 0] = [];
let mut reader = ByteArrayReader::new(&empty);
let mut tag = ILRawTag::new(1234);
f.deserialize_into(&mut reader, &mut tag);
}
| 34.045455 | 81 | 0.653204 |
0e91a84da47a9d658d8570e90a9ab75ffb654d96 | 17,846 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::TreeIter;
use crate::TreeModelFlags;
use crate::TreePath;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
#[doc(alias = "GtkTreeModel")]
pub struct TreeModel(Interface<ffi::GtkTreeModel, ffi::GtkTreeModelIface>);
match fn {
type_ => || ffi::gtk_tree_model_get_type(),
}
}
impl TreeModel {
pub const NONE: Option<&'static TreeModel> = None;
}
pub trait TreeModelExt: 'static {
#[doc(alias = "gtk_tree_model_foreach")]
fn foreach<P: FnMut(&TreeModel, &TreePath, &TreeIter) -> bool>(&self, func: P);
//#[doc(alias = "gtk_tree_model_get")]
//fn get(&self, iter: &TreeIter, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs);
#[doc(alias = "gtk_tree_model_get_column_type")]
#[doc(alias = "get_column_type")]
fn column_type(&self, index_: i32) -> glib::types::Type;
#[doc(alias = "gtk_tree_model_get_flags")]
#[doc(alias = "get_flags")]
fn flags(&self) -> TreeModelFlags;
#[doc(alias = "gtk_tree_model_get_iter")]
#[doc(alias = "get_iter")]
fn iter(&self, path: &TreePath) -> Option<TreeIter>;
#[doc(alias = "gtk_tree_model_get_iter_first")]
#[doc(alias = "get_iter_first")]
fn iter_first(&self) -> Option<TreeIter>;
#[doc(alias = "gtk_tree_model_get_iter_from_string")]
#[doc(alias = "get_iter_from_string")]
fn iter_from_string(&self, path_string: &str) -> Option<TreeIter>;
#[doc(alias = "gtk_tree_model_get_n_columns")]
#[doc(alias = "get_n_columns")]
fn n_columns(&self) -> i32;
#[doc(alias = "gtk_tree_model_get_path")]
#[doc(alias = "get_path")]
fn path(&self, iter: &TreeIter) -> Option<TreePath>;
#[doc(alias = "gtk_tree_model_get_string_from_iter")]
#[doc(alias = "get_string_from_iter")]
fn string_from_iter(&self, iter: &TreeIter) -> Option<glib::GString>;
//#[doc(alias = "gtk_tree_model_get_valist")]
//#[doc(alias = "get_valist")]
//fn valist(&self, iter: &TreeIter, var_args: /*Unknown conversion*//*Unimplemented*/Unsupported);
#[doc(alias = "gtk_tree_model_get_value")]
#[doc(alias = "get_value")]
fn value(&self, iter: &TreeIter, column: i32) -> glib::Value;
#[doc(alias = "gtk_tree_model_iter_children")]
fn iter_children(&self, parent: Option<&TreeIter>) -> Option<TreeIter>;
#[doc(alias = "gtk_tree_model_iter_has_child")]
fn iter_has_child(&self, iter: &TreeIter) -> bool;
#[doc(alias = "gtk_tree_model_iter_n_children")]
fn iter_n_children(&self, iter: Option<&TreeIter>) -> i32;
#[doc(alias = "gtk_tree_model_iter_next")]
fn iter_next(&self, iter: &TreeIter) -> bool;
#[doc(alias = "gtk_tree_model_iter_nth_child")]
fn iter_nth_child(&self, parent: Option<&TreeIter>, n: i32) -> Option<TreeIter>;
#[doc(alias = "gtk_tree_model_iter_parent")]
fn iter_parent(&self, child: &TreeIter) -> Option<TreeIter>;
#[doc(alias = "gtk_tree_model_iter_previous")]
fn iter_previous(&self, iter: &TreeIter) -> bool;
#[doc(alias = "gtk_tree_model_row_changed")]
fn row_changed(&self, path: &TreePath, iter: &TreeIter);
#[doc(alias = "gtk_tree_model_row_deleted")]
fn row_deleted(&self, path: &TreePath);
#[doc(alias = "gtk_tree_model_row_has_child_toggled")]
fn row_has_child_toggled(&self, path: &TreePath, iter: &TreeIter);
#[doc(alias = "gtk_tree_model_row_inserted")]
fn row_inserted(&self, path: &TreePath, iter: &TreeIter);
#[doc(alias = "gtk_tree_model_rows_reordered_with_length")]
fn rows_reordered_with_length(
&self,
path: &TreePath,
iter: Option<&TreeIter>,
new_order: &[i32],
);
#[doc(alias = "row-changed")]
fn connect_row_changed<F: Fn(&Self, &TreePath, &TreeIter) + 'static>(
&self,
f: F,
) -> SignalHandlerId;
#[doc(alias = "row-deleted")]
fn connect_row_deleted<F: Fn(&Self, &TreePath) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "row-has-child-toggled")]
fn connect_row_has_child_toggled<F: Fn(&Self, &TreePath, &TreeIter) + 'static>(
&self,
f: F,
) -> SignalHandlerId;
#[doc(alias = "row-inserted")]
fn connect_row_inserted<F: Fn(&Self, &TreePath, &TreeIter) + 'static>(
&self,
f: F,
) -> SignalHandlerId;
//#[doc(alias = "rows-reordered")]
//fn connect_rows_reordered<Unsupported or ignored types>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<TreeModel>> TreeModelExt for O {
fn foreach<P: FnMut(&TreeModel, &TreePath, &TreeIter) -> bool>(&self, func: P) {
let func_data: P = func;
unsafe extern "C" fn func_func<P: FnMut(&TreeModel, &TreePath, &TreeIter) -> bool>(
model: *mut ffi::GtkTreeModel,
path: *mut ffi::GtkTreePath,
iter: *mut ffi::GtkTreeIter,
data: glib::ffi::gpointer,
) -> glib::ffi::gboolean {
let model = from_glib_borrow(model);
let path = from_glib_borrow(path);
let iter = from_glib_borrow(iter);
let callback: *mut P = data as *const _ as usize as *mut P;
let res = (*callback)(&model, &path, &iter);
res.into_glib()
}
let func = Some(func_func::<P> as _);
let super_callback0: &P = &func_data;
unsafe {
ffi::gtk_tree_model_foreach(
self.as_ref().to_glib_none().0,
func,
super_callback0 as *const _ as usize as *mut _,
);
}
}
//fn get(&self, iter: &TreeIter, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) {
// unsafe { TODO: call ffi:gtk_tree_model_get() }
//}
fn column_type(&self, index_: i32) -> glib::types::Type {
unsafe {
from_glib(ffi::gtk_tree_model_get_column_type(
self.as_ref().to_glib_none().0,
index_,
))
}
}
fn flags(&self) -> TreeModelFlags {
unsafe {
from_glib(ffi::gtk_tree_model_get_flags(
self.as_ref().to_glib_none().0,
))
}
}
fn iter(&self, path: &TreePath) -> Option<TreeIter> {
unsafe {
let mut iter = TreeIter::uninitialized();
let ret = from_glib(ffi::gtk_tree_model_get_iter(
self.as_ref().to_glib_none().0,
iter.to_glib_none_mut().0,
mut_override(path.to_glib_none().0),
));
if ret {
Some(iter)
} else {
None
}
}
}
fn iter_first(&self) -> Option<TreeIter> {
unsafe {
let mut iter = TreeIter::uninitialized();
let ret = from_glib(ffi::gtk_tree_model_get_iter_first(
self.as_ref().to_glib_none().0,
iter.to_glib_none_mut().0,
));
if ret {
Some(iter)
} else {
None
}
}
}
fn iter_from_string(&self, path_string: &str) -> Option<TreeIter> {
unsafe {
let mut iter = TreeIter::uninitialized();
let ret = from_glib(ffi::gtk_tree_model_get_iter_from_string(
self.as_ref().to_glib_none().0,
iter.to_glib_none_mut().0,
path_string.to_glib_none().0,
));
if ret {
Some(iter)
} else {
None
}
}
}
fn n_columns(&self) -> i32 {
unsafe { ffi::gtk_tree_model_get_n_columns(self.as_ref().to_glib_none().0) }
}
fn path(&self, iter: &TreeIter) -> Option<TreePath> {
unsafe {
from_glib_full(ffi::gtk_tree_model_get_path(
self.as_ref().to_glib_none().0,
mut_override(iter.to_glib_none().0),
))
}
}
fn string_from_iter(&self, iter: &TreeIter) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::gtk_tree_model_get_string_from_iter(
self.as_ref().to_glib_none().0,
mut_override(iter.to_glib_none().0),
))
}
}
//fn valist(&self, iter: &TreeIter, var_args: /*Unknown conversion*//*Unimplemented*/Unsupported) {
// unsafe { TODO: call ffi:gtk_tree_model_get_valist() }
//}
fn value(&self, iter: &TreeIter, column: i32) -> glib::Value {
unsafe {
let mut value = glib::Value::uninitialized();
ffi::gtk_tree_model_get_value(
self.as_ref().to_glib_none().0,
mut_override(iter.to_glib_none().0),
column,
value.to_glib_none_mut().0,
);
value
}
}
fn iter_children(&self, parent: Option<&TreeIter>) -> Option<TreeIter> {
unsafe {
let mut iter = TreeIter::uninitialized();
let ret = from_glib(ffi::gtk_tree_model_iter_children(
self.as_ref().to_glib_none().0,
iter.to_glib_none_mut().0,
mut_override(parent.to_glib_none().0),
));
if ret {
Some(iter)
} else {
None
}
}
}
fn iter_has_child(&self, iter: &TreeIter) -> bool {
unsafe {
from_glib(ffi::gtk_tree_model_iter_has_child(
self.as_ref().to_glib_none().0,
mut_override(iter.to_glib_none().0),
))
}
}
fn iter_n_children(&self, iter: Option<&TreeIter>) -> i32 {
unsafe {
ffi::gtk_tree_model_iter_n_children(
self.as_ref().to_glib_none().0,
mut_override(iter.to_glib_none().0),
)
}
}
fn iter_next(&self, iter: &TreeIter) -> bool {
unsafe {
from_glib(ffi::gtk_tree_model_iter_next(
self.as_ref().to_glib_none().0,
mut_override(iter.to_glib_none().0),
))
}
}
fn iter_nth_child(&self, parent: Option<&TreeIter>, n: i32) -> Option<TreeIter> {
unsafe {
let mut iter = TreeIter::uninitialized();
let ret = from_glib(ffi::gtk_tree_model_iter_nth_child(
self.as_ref().to_glib_none().0,
iter.to_glib_none_mut().0,
mut_override(parent.to_glib_none().0),
n,
));
if ret {
Some(iter)
} else {
None
}
}
}
fn iter_parent(&self, child: &TreeIter) -> Option<TreeIter> {
unsafe {
let mut iter = TreeIter::uninitialized();
let ret = from_glib(ffi::gtk_tree_model_iter_parent(
self.as_ref().to_glib_none().0,
iter.to_glib_none_mut().0,
mut_override(child.to_glib_none().0),
));
if ret {
Some(iter)
} else {
None
}
}
}
fn iter_previous(&self, iter: &TreeIter) -> bool {
unsafe {
from_glib(ffi::gtk_tree_model_iter_previous(
self.as_ref().to_glib_none().0,
mut_override(iter.to_glib_none().0),
))
}
}
fn row_changed(&self, path: &TreePath, iter: &TreeIter) {
unsafe {
ffi::gtk_tree_model_row_changed(
self.as_ref().to_glib_none().0,
mut_override(path.to_glib_none().0),
mut_override(iter.to_glib_none().0),
);
}
}
fn row_deleted(&self, path: &TreePath) {
unsafe {
ffi::gtk_tree_model_row_deleted(
self.as_ref().to_glib_none().0,
mut_override(path.to_glib_none().0),
);
}
}
fn row_has_child_toggled(&self, path: &TreePath, iter: &TreeIter) {
unsafe {
ffi::gtk_tree_model_row_has_child_toggled(
self.as_ref().to_glib_none().0,
mut_override(path.to_glib_none().0),
mut_override(iter.to_glib_none().0),
);
}
}
fn row_inserted(&self, path: &TreePath, iter: &TreeIter) {
unsafe {
ffi::gtk_tree_model_row_inserted(
self.as_ref().to_glib_none().0,
mut_override(path.to_glib_none().0),
mut_override(iter.to_glib_none().0),
);
}
}
fn rows_reordered_with_length(
&self,
path: &TreePath,
iter: Option<&TreeIter>,
new_order: &[i32],
) {
let length = new_order.len() as i32;
unsafe {
ffi::gtk_tree_model_rows_reordered_with_length(
self.as_ref().to_glib_none().0,
mut_override(path.to_glib_none().0),
mut_override(iter.to_glib_none().0),
new_order.to_glib_none().0,
length,
);
}
}
fn connect_row_changed<F: Fn(&Self, &TreePath, &TreeIter) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn row_changed_trampoline<
P: IsA<TreeModel>,
F: Fn(&P, &TreePath, &TreeIter) + 'static,
>(
this: *mut ffi::GtkTreeModel,
path: *mut ffi::GtkTreePath,
iter: *mut ffi::GtkTreeIter,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
TreeModel::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(path),
&from_glib_borrow(iter),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"row-changed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
row_changed_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_row_deleted<F: Fn(&Self, &TreePath) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn row_deleted_trampoline<
P: IsA<TreeModel>,
F: Fn(&P, &TreePath) + 'static,
>(
this: *mut ffi::GtkTreeModel,
path: *mut ffi::GtkTreePath,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
TreeModel::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(path),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"row-deleted\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
row_deleted_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_row_has_child_toggled<F: Fn(&Self, &TreePath, &TreeIter) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn row_has_child_toggled_trampoline<
P: IsA<TreeModel>,
F: Fn(&P, &TreePath, &TreeIter) + 'static,
>(
this: *mut ffi::GtkTreeModel,
path: *mut ffi::GtkTreePath,
iter: *mut ffi::GtkTreeIter,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
TreeModel::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(path),
&from_glib_borrow(iter),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"row-has-child-toggled\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
row_has_child_toggled_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_row_inserted<F: Fn(&Self, &TreePath, &TreeIter) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn row_inserted_trampoline<
P: IsA<TreeModel>,
F: Fn(&P, &TreePath, &TreeIter) + 'static,
>(
this: *mut ffi::GtkTreeModel,
path: *mut ffi::GtkTreePath,
iter: *mut ffi::GtkTreeIter,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
TreeModel::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(path),
&from_glib_borrow(iter),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"row-inserted\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
row_inserted_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
//fn connect_rows_reordered<Unsupported or ignored types>(&self, f: F) -> SignalHandlerId {
// Unimplemented new_order: *.Pointer
//}
}
impl fmt::Display for TreeModel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("TreeModel")
}
}
| 32.097122 | 103 | 0.527793 |
2374a72e61b853b0da33494b4c14edb1d5c95a09 | 497 | use std::env;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
fn main() {
// Put the linker script somewhere the linker can find it
let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
File::create(out.join("memory.x"))
.unwrap()
.write_all(include_bytes!("memory.x"))
.unwrap();
println!("cargo:rustc-link-search={}", out.display());
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=memory.x");
} | 29.235294 | 62 | 0.62173 |
91635a9fd6416f2794fdf53f8d6d2542df1edd64 | 890 | /*
* Copyright 2018 Bitwise IO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* -----------------------------------------------------------------------------
*/
// Includes the autogenerated protobuf messages
include!(concat!(env!("OUT_DIR"), "/messages/mod.rs"));
pub use protobuf::Message;
#[cfg(feature = "old-sawtooth")]
pub use self::processor_old as processor;
| 35.6 | 80 | 0.666292 |
330cf73a855b2dcac727a1da03fc371f0ef2342f | 34,654 | //! BIP39 mnemonics
//!
//! Can be used to generate the root key of a given HDWallet,
//! an address or simply convert bits to mnemonic for human friendly
//! value.
//!
//! For more details about the protocol, see
//! [Bitcoin Improvement Proposal 39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki)
//!
//! # Example
//!
//! ## To create a new HDWallet
//!
//! ```
//! extern crate bip39;
//! extern crate rand_os;
//!
//! use bip39::*;
//! use rand_os::OsRng;
//! use rand_os::rand_core::RngCore;
//!
//! // first, you need to generate the original entropy
//! let entropy = Entropy::generate(Type::Type18Words, |bytes| OsRng.fill_bytes(bytes));
//!
//! // human readable mnemonics (in English) to retrieve the original entropy
//! // and eventually recover a HDWallet.
//! let mnemonic_phrase = entropy.to_mnemonics().to_string(&dictionary::ENGLISH);
//!
//! // The seed of the HDWallet is generated from the mnemonic string
//! // in the associated language.
//! let seed = Seed::from_mnemonic_string(&mnemonic_phrase, b"some password");
//! ```
//!
//! ## To recover a HDWallet
//!
//! ```
//! use bip39::*;
//!
//! let mnemonics = "mimic left ask vacant toast follow bitter join diamond gate attend obey";
//!
//! // to retrieve the seed, you only need the mnemonic string,
//! // here we construct the `MnemonicString` by verifying the
//! // mnemonics are valid against the given dictionary (English here).
//! let mnemonic_phrase = MnemonicString::new(&dictionary::ENGLISH, mnemonics.to_owned())
//! .expect("the given mnemonics are valid English words");
//!
//! // The seed of the HDWallet is generated from the mnemonic string
//! // in the associated language.
//! let seed = Seed::from_mnemonic_string(&mnemonic_phrase, b"some password");
//! ```
//!
use cryptoxide::hmac::Hmac;
use cryptoxide::pbkdf2::pbkdf2;
use cryptoxide::sha2::Sha512;
use std::{error, fmt, ops::Deref, result, str};
use util::{hex, securemem};
/// Error regarding BIP39 operations
#[derive(Debug, PartialEq, Eq)]
pub enum Error {
/// Received an unsupported number of mnemonic words. The parameter
/// contains the unsupported number. Supported values are
/// described as part of the [`Type`](./enum.Type.html).
WrongNumberOfWords(usize),
/// The entropy is of invalid size. The parameter contains the invalid size,
/// the list of supported entropy size are described as part of the
/// [`Type`](./enum.Type.html).
WrongKeySize(usize),
/// The given mnemonic is out of bound, i.e. its index is above 2048 and
/// is invalid within BIP39 specifications.
MnemonicOutOfBound(u16),
/// Forward error regarding dictionary operations.
LanguageError(dictionary::Error),
/// the Seed is of invalid size. The parameter is the given seed size,
/// the expected seed size is [`SEED_SIZE`](./constant.SEED_SIZE.html).
InvalidSeedSize(usize),
/// checksum is invalid. The first parameter is the expected checksum,
/// the second id the computed checksum. This error means that the given
/// mnemonics are invalid to retrieve the original entropy. The user might
/// have given an invalid mnemonic phrase.
InvalidChecksum(u8, u8)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Error::InvalidSeedSize(sz) => {
write!(f, "Invalid Seed Size, expected {} bytes, but received {} bytes.", SEED_SIZE, sz)
}
&Error::WrongNumberOfWords(sz) => write!(f, "Unsupported number of mnemonic words: {}", sz),
&Error::WrongKeySize(sz) => write!(f, "Unsupported mnemonic entropy size: {}", sz),
&Error::MnemonicOutOfBound(val) => write!(f, "The given mnemonic is out of bound, {}", val),
&Error::LanguageError(_) => write!(f, "Unknown mnemonic word"),
&Error::InvalidChecksum(cs1, cs2) => {
write!(f, "Invalid Entropy's Checksum, expected {:08b} but found {:08b}", cs1, cs2)
}
}
}
}
impl From<dictionary::Error> for Error {
fn from(e: dictionary::Error) -> Self {
Error::LanguageError(e)
}
}
impl error::Error for Error {
fn cause(&self) -> Option<&error::Error> {
match self {
Error::LanguageError(ref error) => Some(error),
_ => None
}
}
}
/// convenient Alias to wrap up BIP39 operations that may return
/// an [`Error`](./enum.Error.html).
pub type Result<T> = result::Result<T, Error>;
/// BIP39 entropy is used as root entropy for the HDWallet PRG
/// to generate the HDWallet root keys.
///
/// See module documentation for mode details about how to use
/// `Entropy`.
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
pub enum Entropy {
Entropy9([u8; 12]),
Entropy12([u8; 16]),
Entropy15([u8; 20]),
Entropy18([u8; 24]),
Entropy21([u8; 28]),
Entropy24([u8; 32])
}
impl Entropy {
/// Retrieve an `Entropy` from the given slice.
///
/// # Error
///
/// This function may fail if the given slice's length is not
/// one of the supported entropy length. See [`Type`](./enum.Type.html)
/// for the list of supported entropy sizes.
///
pub fn from_slice(bytes: &[u8]) -> Result<Self> {
let t = Type::from_entropy_size(bytes.len() * 8)?;
Ok(Self::new(t, bytes))
}
/// generate entropy using the given random generator.
///
/// # Example
///
/// ```
/// extern crate rand_os;
/// extern crate bip39;
///
/// use rand_os::OsRng;
/// use rand_os::rand_core::RngCore;
///
/// use bip39::*;
///
/// let entropy = Entropy::generate(Type::Type15Words, |bytes| OsRng.fill_bytes(bytes));
/// ```
///
pub fn generate<G>(t: Type, gen: G) -> Self
where
G: Fn(&mut [u8]) -> ()
{
let bytes = [0u8; 32];
let mut entropy = Self::new(t, &bytes[..]);
gen(entropy.as_mut());
entropy
}
fn new(t: Type, bytes: &[u8]) -> Self {
let mut e = match t {
Type::Type9Words => Entropy::Entropy9([0u8; 12]),
Type::Type12Words => Entropy::Entropy12([0u8; 16]),
Type::Type15Words => Entropy::Entropy15([0u8; 20]),
Type::Type18Words => Entropy::Entropy18([0u8; 24]),
Type::Type21Words => Entropy::Entropy21([0u8; 28]),
Type::Type24Words => Entropy::Entropy24([0u8; 32])
};
for i in 0..e.as_ref().len() {
e.as_mut()[i] = bytes[i]
}
e
}
/// handy helper to retrieve the [`Type`](./enum.Type.html)
/// from the `Entropy`.
#[inline]
pub fn get_type(&self) -> Type {
match self {
&Entropy::Entropy9(_) => Type::Type9Words,
&Entropy::Entropy12(_) => Type::Type12Words,
&Entropy::Entropy15(_) => Type::Type15Words,
&Entropy::Entropy18(_) => Type::Type18Words,
&Entropy::Entropy21(_) => Type::Type21Words,
&Entropy::Entropy24(_) => Type::Type24Words
}
}
fn as_mut(&mut self) -> &mut [u8] {
match self {
&mut Entropy::Entropy9(ref mut b) => b.as_mut(),
&mut Entropy::Entropy12(ref mut b) => b.as_mut(),
&mut Entropy::Entropy15(ref mut b) => b.as_mut(),
&mut Entropy::Entropy18(ref mut b) => b.as_mut(),
&mut Entropy::Entropy21(ref mut b) => b.as_mut(),
&mut Entropy::Entropy24(ref mut b) => b.as_mut()
}
}
fn hash(&self) -> [u8; 32] {
use cryptoxide::digest::Digest;
use cryptoxide::sha2::Sha256;
let mut hasher = Sha256::new();
let mut res = [0u8; 32];
hasher.input(self.as_ref());
hasher.result(&mut res);
res
}
/// compute the checksum of the entropy, be aware that only
/// part of the bytes may be useful for the checksum depending
/// of the [`Type`](./enum.Type.html) of the `Entropy`.
///
/// | entropy type | checksum size (in bits) |
/// | ------------ | ----------------------- |
/// | 9 words | 3 bits |
/// | 12 words | 4 bits |
/// | 15 words | 5 bits |
/// | 18 words | 6 bits |
/// | 21 words | 7 bits |
/// | 24 words | 8 bits |
///
/// # Example
///
/// ```
/// extern crate rand_os;
/// extern crate bip39;
///
/// use rand_os::OsRng;
/// use rand_os::rand_core::RngCore;
///
/// use bip39::*;
///
/// let entropy = Entropy::generate(Type::Type15Words, |bytes| OsRng.fill_bytes(bytes));
///
/// let checksum = entropy.checksum() & 0b0001_1111;
/// ```
///
pub fn checksum(&self) -> u8 {
let hash = self.hash()[0];
match self.get_type() {
Type::Type9Words => (hash >> 5) & 0b0000_0111,
Type::Type12Words => (hash >> 4) & 0b0000_1111,
Type::Type15Words => (hash >> 3) & 0b0001_1111,
Type::Type18Words => (hash >> 2) & 0b0011_1111,
Type::Type21Words => (hash >> 1) & 0b0111_1111,
Type::Type24Words => hash
}
}
/// retrieve the `Entropy` from the given [`Mnemonics`](./struct.Mnemonics.html).
///
/// # Example
///
/// ```
/// # use bip39::*;
///
/// const MNEMONICS : &'static str = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about";
/// let mnemonics = Mnemonics::from_string(&dictionary::ENGLISH, MNEMONICS)
/// .expect("validating the given mnemonics phrase");
///
/// let entropy = Entropy::from_mnemonics(&mnemonics)
/// .expect("retrieving the entropy from the mnemonics");
/// ```
///
/// # Error
///
/// This function may fail if the Mnemonic has an invalid checksum. As part of the
/// BIP39, the checksum must be embedded in the mnemonic phrase. This allow to check
/// the mnemonics have been correctly entered by the user.
///
pub fn from_mnemonics(mnemonics: &Mnemonics) -> Result<Self> {
use util::bits::BitWriterBy11;
let t = mnemonics.get_type();
let mut to_validate = BitWriterBy11::new();
for mnemonic in mnemonics.0.iter() {
to_validate.write(mnemonic.0);
}
let mut r = to_validate.to_bytes();
let entropy_bytes = Vec::from(&r[..t.to_key_size() / 8]);
let entropy = Self::new(t, &entropy_bytes[..]);
if let Some(h) = r.pop() {
let h2 = h >> (8 - t.checksum_size_bits());
let cs = entropy.checksum();
if cs != h2 {
return Err(Error::InvalidChecksum(cs, h2));
}
};
Ok(entropy)
}
/// convert the given `Entropy` into a mnemonic phrase.
///
/// # Example
///
/// ```
/// # use bip39::*;
///
/// let entropy = Entropy::Entropy12([0;16]);
///
/// let mnemonics = entropy.to_mnemonics()
/// .to_string(&dictionary::ENGLISH);
/// ```
///
pub fn to_mnemonics(&self) -> Mnemonics {
use util::bits::BitReaderBy11;
let t = self.get_type();
let mut combined = Vec::from(self.as_ref());
combined.extend(&self.hash()[..]);
let mut reader = BitReaderBy11::new(&combined);
let mut words: Vec<MnemonicIndex> = Vec::new();
for _ in 0..t.mnemonic_count() {
// here we are confident the entropy has already
// enough bytes to read all the bits we need.
let n = reader.read();
// assert only in non optimized builds, Since we read 11bits
// by 11 bits we should not allow values beyond 2047.
debug_assert!(
n <= MAX_MNEMONIC_VALUE,
"Something went wrong, the BitReaderBy11 did return an impossible value: {} (0b{:016b})",
n,
n
);
// here we can unwrap safely as 11bits can
// only store up to the value 2047
words.push(MnemonicIndex::new(n).unwrap());
}
// by design, it is safe to call unwrap here as
// the mnemonic length has been validated by construction.
Mnemonics::from_mnemonics(words).unwrap()
}
}
impl fmt::Display for Entropy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", hex::encode(self.as_ref()))
}
}
impl fmt::Debug for Entropy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", hex::encode(self.as_ref()))
}
}
impl AsRef<[u8]> for Entropy {
fn as_ref(&self) -> &[u8] {
match self {
&Entropy::Entropy9(ref b) => b.as_ref(),
&Entropy::Entropy12(ref b) => b.as_ref(),
&Entropy::Entropy15(ref b) => b.as_ref(),
&Entropy::Entropy18(ref b) => b.as_ref(),
&Entropy::Entropy21(ref b) => b.as_ref(),
&Entropy::Entropy24(ref b) => b.as_ref()
}
}
}
impl Deref for Entropy {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl Drop for Entropy {
fn drop(&mut self) {
match self {
Entropy::Entropy9(b) => securemem::zero(b),
Entropy::Entropy12(b) => securemem::zero(b),
Entropy::Entropy15(b) => securemem::zero(b),
Entropy::Entropy18(b) => securemem::zero(b),
Entropy::Entropy21(b) => securemem::zero(b),
Entropy::Entropy24(b) => securemem::zero(b)
}
}
}
/// the expected size of a seed, in bytes.
pub const SEED_SIZE: usize = 64;
/// A BIP39 `Seed` object, will be used to generate a given HDWallet
/// root key.
///
/// See the module documentation for more details about how to use it
/// within the `keychain` library.
pub struct Seed([u8; SEED_SIZE]);
impl Seed {
/// create a Seed by taking ownership of the given array
///
/// # Example
///
/// ```
/// use bip39::{Seed, SEED_SIZE};
///
/// let bytes = [0u8;SEED_SIZE];
/// let seed = Seed::from_bytes(bytes);
///
/// assert!(seed.as_ref().len() == SEED_SIZE);
/// ```
pub fn from_bytes(buf: [u8; SEED_SIZE]) -> Self {
Seed(buf)
}
/// create a Seed by copying the given slice into a new array
///
/// # Example
///
/// ```
/// use bip39::{Seed, SEED_SIZE};
///
/// let bytes = [0u8;SEED_SIZE];
/// let wrong = [0u8;31];
///
/// assert!(Seed::from_slice(&wrong[..]).is_err());
/// assert!(Seed::from_slice(&bytes[..]).is_ok());
/// ```
///
/// # Error
///
/// This constructor may fail if the given slice's length is not
/// compatible to define a `Seed` (see [`SEED_SIZE`](./constant.SEED_SIZE.html)).
///
pub fn from_slice(buf: &[u8]) -> Result<Self> {
if buf.len() != SEED_SIZE {
return Err(Error::InvalidSeedSize(buf.len()));
}
let mut v = [0u8; SEED_SIZE];
v[..].clone_from_slice(buf);
Ok(Seed::from_bytes(v))
}
/// get the seed from the given [`MnemonicString`] and the given password.
///
/// [`MnemonicString`]: ./struct.MnemonicString.html
///
/// Note that the `Seed` is not generated from the `Entropy` directly. It is a
/// design choice of Bip39.
///
/// # Safety
///
/// The password is meant to allow plausible deniability. While it is possible
/// not to use a password to protect the HDWallet it is better to add one.
///
/// # Example
///
/// ```
/// # use bip39::*;
///
/// const MNEMONICS : &'static str = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about";
/// let mnemonics = MnemonicString::new(&dictionary::ENGLISH, MNEMONICS.to_owned())
/// .expect("valid Mnemonic phrase");
///
/// let seed = Seed::from_mnemonic_string(&mnemonics, b"Bourbaki team rocks!");
/// ```
///
pub fn from_mnemonic_string(mnemonics: &MnemonicString, password: &[u8]) -> Self {
let mut salt = Vec::from("mnemonic".as_bytes());
salt.extend_from_slice(password);
let mut mac = Hmac::new(Sha512::new(), mnemonics.0.as_bytes());
let mut result = [0; SEED_SIZE];
pbkdf2(&mut mac, &salt, 2048, &mut result);
Self::from_bytes(result)
}
}
impl PartialEq for Seed {
fn eq(&self, other: &Self) -> bool {
self.0.as_ref() == other.0.as_ref()
}
}
impl fmt::Debug for Seed {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", hex::encode(self.as_ref()))
}
}
impl fmt::Display for Seed {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", hex::encode(self.as_ref()))
}
}
impl AsRef<[u8]> for Seed {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl Deref for Seed {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl Drop for Seed {
fn drop(&mut self) {
self.0.copy_from_slice(&[0; SEED_SIZE][..]);
}
}
/// RAII for validated mnemonic words. This guarantee a given mnemonic phrase
/// has been safely validated against a dictionary.
///
/// See the module documentation for more details about how to use it
/// within the `keychain` library.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
#[cfg_attr(feature = "generic-serialization", derive(Serialize, Deserialize))]
pub struct MnemonicString(String);
impl MnemonicString {
/// create a `MnemonicString` from the given `String`. This function
/// will validate the mnemonic phrase against the given [`Language`]
///
/// [`Language`]: ./dictionary/trait.Language.html
///
/// # Example
///
/// ```
/// # use bip39::*;
///
/// const MNEMONICS : &'static str = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about";
/// let mnemonics = MnemonicString::new(&dictionary::ENGLISH, MNEMONICS.to_owned())
/// .expect("valid Mnemonic phrase");
/// ```
///
/// # Error
///
/// This function may fail if one or all words are not recognized
/// in the given [`Language`].
///
pub fn new<D>(dic: &D, s: String) -> Result<Self>
where
D: dictionary::Language
{
let _ = Mnemonics::from_string(dic, &s)?;
Ok(MnemonicString(s))
}
}
impl Deref for MnemonicString {
type Target = str;
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl fmt::Display for MnemonicString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// The support type of `Mnemonics`, i.e. the number of words supported in a
/// mnemonic phrase.
///
/// This enum provide the following properties:
///
/// | number of words | entropy size (bits) | checksum size (bits) |
/// | --------------- | ------------------- | --------------------- |
/// | 9 | 96 | 3 |
/// | 12 | 128 | 4 |
/// | 15 | 160 | 5 |
/// | 18 | 192 | 6 |
/// | 21 | 224 | 7 |
/// | 24 | 256 | 8 |
///
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
#[cfg_attr(feature = "generic-serialization", derive(Serialize, Deserialize))]
pub enum Type {
Type9Words,
Type12Words,
Type15Words,
Type18Words,
Type21Words,
Type24Words
}
impl Type {
pub fn from_word_count(len: usize) -> Result<Self> {
match len {
9 => Ok(Type::Type9Words),
12 => Ok(Type::Type12Words),
15 => Ok(Type::Type15Words),
18 => Ok(Type::Type18Words),
21 => Ok(Type::Type21Words),
24 => Ok(Type::Type24Words),
_ => Err(Error::WrongNumberOfWords(len))
}
}
pub fn from_entropy_size(len: usize) -> Result<Self> {
match len {
96 => Ok(Type::Type9Words),
128 => Ok(Type::Type12Words),
160 => Ok(Type::Type15Words),
192 => Ok(Type::Type18Words),
224 => Ok(Type::Type21Words),
256 => Ok(Type::Type24Words),
_ => Err(Error::WrongKeySize(len))
}
}
pub fn to_key_size(&self) -> usize {
match self {
&Type::Type9Words => 96,
&Type::Type12Words => 128,
&Type::Type15Words => 160,
&Type::Type18Words => 192,
&Type::Type21Words => 224,
&Type::Type24Words => 256
}
}
pub fn checksum_size_bits(&self) -> usize {
match self {
&Type::Type9Words => 3,
&Type::Type12Words => 4,
&Type::Type15Words => 5,
&Type::Type18Words => 6,
&Type::Type21Words => 7,
&Type::Type24Words => 8
}
}
pub fn mnemonic_count(&self) -> usize {
match self {
&Type::Type9Words => 9,
&Type::Type12Words => 12,
&Type::Type15Words => 15,
&Type::Type18Words => 18,
&Type::Type21Words => 21,
&Type::Type24Words => 24
}
}
}
impl Default for Type {
fn default() -> Type {
Type::Type18Words
}
}
impl fmt::Display for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Type::Type9Words => write!(f, "9"),
&Type::Type12Words => write!(f, "12"),
&Type::Type15Words => write!(f, "15"),
&Type::Type18Words => write!(f, "18"),
&Type::Type21Words => write!(f, "21"),
&Type::Type24Words => write!(f, "24")
}
}
}
impl str::FromStr for Type {
type Err = &'static str;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
match s {
"9" => Ok(Type::Type9Words),
"12" => Ok(Type::Type12Words),
"15" => Ok(Type::Type15Words),
"18" => Ok(Type::Type18Words),
"21" => Ok(Type::Type21Words),
"24" => Ok(Type::Type24Words),
_ => Err("Unknown bip39 mnemonic size")
}
}
}
/// the maximum authorized value for a mnemonic. i.e. 2047
pub const MAX_MNEMONIC_VALUE: u16 = 2047;
/// Safe representation of a valid mnemonic index (see
/// [`MAX_MNEMONIC_VALUE`](./constant.MAX_MNEMONIC_VALUE.html)).
///
/// See [`dictionary module documentation`](./dictionary/index.html) for
/// more details about how to use this.
///
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
pub struct MnemonicIndex(pub u16);
impl MnemonicIndex {
/// smart constructor, validate the given value fits the mnemonic index
/// boundaries (see [`MAX_MNEMONIC_VALUE`](./constant.MAX_MNEMONIC_VALUE.html)).
///
/// # Example
///
/// ```
/// # use bip39::*;
/// #
/// let index = MnemonicIndex::new(1029);
/// assert!(index.is_ok());
/// // this line will fail
/// let index = MnemonicIndex::new(4029);
/// assert_eq!(index, Err(Error::MnemonicOutOfBound(4029)));
/// ```
///
/// # Error
///
/// returns an [`Error::MnemonicOutOfBound`](enum.Error.html#variant.MnemonicOutOfBound)
/// if the given value does not fit the valid values.
///
pub fn new(m: u16) -> Result<Self> {
if m <= MAX_MNEMONIC_VALUE {
Ok(MnemonicIndex(m))
} else {
Err(Error::MnemonicOutOfBound(m))
}
}
/// lookup in the given dictionary to retrieve the mnemonic word.
///
/// # panic
///
/// this function may panic if the
/// [`Language::lookup_word`](./dictionary/trait.Language.html#method.lookup_word)
/// returns an error. Which should not happen.
///
pub fn to_word<D>(self, dic: &D) -> String
where
D: dictionary::Language
{
dic.lookup_word(self).unwrap()
}
/// retrieve the Mnemonic index from the given word in the
/// given dictionary.
///
/// # Error
///
/// May fail with a [`LanguageError`](enum.Error.html#variant.LanguageError)
/// if the given [`Language`](./dictionary/trait.Language.html) returns the
/// given word is not within its dictionary.
///
pub fn from_word<D>(dic: &D, word: &str) -> Result<Self>
where
D: dictionary::Language
{
let v = dic.lookup_mnemonic(word)?;
Ok(v)
}
}
/// Language agnostic mnemonic phrase representation.
///
/// This is an handy intermediate representation of a given mnemonic
/// phrase. One can use this intermediate representation to translate
/// mnemonic from one [`Language`](./dictionary/trait.Language.html)
/// to another. **However** keep in mind that the [`Seed`](./struct.Seed.html)
/// is linked to the mnemonic string in a specific language, in a specific
/// dictionary. The [`Entropy`](./struct.Entropy.html) will be the same
/// but the resulted [`Seed`](./struct.Seed.html) will differ and all
/// the derived key of a HDWallet using the [`Seed`](./struct.Seed.html)
/// as a source to generate the root key.
///
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Mnemonics(Vec<MnemonicIndex>);
impl AsRef<[MnemonicIndex]> for Mnemonics {
fn as_ref(&self) -> &[MnemonicIndex] {
&self.0[..]
}
}
impl Mnemonics {
/// get the [`Type`](./enum.Type.html) of this given `Mnemonics`.
///
/// # panic
///
/// the only case this function may panic is if the `Mnemonics` has
/// been badly constructed (i.e. not from one of the given smart
/// constructor).
///
pub fn get_type(&self) -> Type {
Type::from_word_count(self.0.len()).unwrap()
}
/// get the mnemonic string representation in the given
/// [`Language`](./dictionary/trait.Language.html).
///
pub fn to_string<D>(&self, dic: &D) -> MnemonicString
where
D: dictionary::Language
{
let mut vec = String::new();
let mut first = true;
for m in self.0.iter() {
if first {
first = false;
} else {
vec.push_str(dic.separator());
}
vec.push_str(&m.to_word(dic))
}
MnemonicString(vec)
}
/// Construct the `Mnemonics` from its string representation in the given
/// [`Language`](./dictionary/trait.Language.html).
///
/// # Error
///
/// May fail with a [`LanguageError`](enum.Error.html#variant.LanguageError)
/// if the given [`Language`](./dictionary/trait.Language.html) returns the
/// given word is not within its dictionary.
///
pub fn from_string<D>(dic: &D, mnemonics: &str) -> Result<Self>
where
D: dictionary::Language
{
let mut vec = vec![];
for word in mnemonics.split(dic.separator()) {
vec.push(MnemonicIndex::from_word(dic, word)?);
}
Mnemonics::from_mnemonics(vec)
}
/// Construct the `Mnemonics` from the given array of `MnemonicIndex`.
///
/// # Error
///
/// May fail if this is an invalid number of `MnemonicIndex`.
///
pub fn from_mnemonics(mnemonics: Vec<MnemonicIndex>) -> Result<Self> {
let _ = Type::from_word_count(mnemonics.len())?;
Ok(Mnemonics(mnemonics))
}
}
pub mod dictionary {
//! Language support for BIP39 implementations.
//!
//! We provide default dictionaries for the some common languages.
//! This interface is exposed to allow users to implement custom
//! dictionaries.
//!
//! Because this module is part of the `keychain` crate and that we
//! need to keep the dependencies as small as possible we do not support
//! UTF8 NFKD by default. Users must be sure to compose (or decompose)
//! our output (or input) UTF8 strings.
//!
use std::{error, fmt, result};
use super::MnemonicIndex;
/// Errors associated to a given language/dictionary
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
#[cfg_attr(feature = "generic-serialization", derive(Serialize, Deserialize))]
pub enum Error {
/// this means the given word is not in the Dictionary of the Language.
MnemonicWordNotFoundInDictionary(String)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Error::MnemonicWordNotFoundInDictionary(ref s) => {
write!(f, "Mnemonic word not found in dictionary \"{}\"", s)
}
}
}
}
impl error::Error for Error {}
/// wrapper for `dictionary` operations that may return an error
pub type Result<T> = result::Result<T, Error>;
/// trait to represent the the properties that needs to be associated to
/// a given language and its dictionary of known mnemonic words.
///
pub trait Language {
fn name(&self) -> &'static str;
fn separator(&self) -> &'static str;
fn lookup_mnemonic(&self, word: &str) -> Result<MnemonicIndex>;
fn lookup_word(&self, mnemonic: MnemonicIndex) -> Result<String>;
}
/// Default Dictionary basic support for the different main languages.
/// This dictionary expect the inputs to have been normalized (UTF-8 NFKD).
///
/// If you wish to implement support for non pre-normalized form you can
/// create reuse this dictionary in a custom struct and implement support
/// for [`Language`](./trait.Language.html) accordingly (_hint_: use
/// [`unicode-normalization`](https://crates.io/crates/unicode-normalization)).
///
pub struct DefaultDictionary {
pub words: [&'static str; 2048],
pub name: &'static str
}
impl Language for DefaultDictionary {
fn name(&self) -> &'static str {
self.name
}
fn separator(&self) -> &'static str {
" "
}
fn lookup_mnemonic(&self, word: &str) -> Result<MnemonicIndex> {
match self.words.iter().position(|x| x == &word) {
None => Err(Error::MnemonicWordNotFoundInDictionary(word.to_string())),
Some(v) => {
Ok(
// it is safe to call unwrap as we guarantee that the
// returned index `v` won't be out of bound for a
// `MnemonicIndex` (DefaultDictionary.words is an array of 2048 elements)
MnemonicIndex::new(v as u16).unwrap()
)
}
}
}
fn lookup_word(&self, mnemonic: MnemonicIndex) -> Result<String> {
Ok(unsafe { self.words.get_unchecked(mnemonic.0 as usize) }).map(|s| String::from(*s))
}
}
/// default English dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#wordlists)
///
pub const ENGLISH: DefaultDictionary =
DefaultDictionary { words: include!("dicts/bip39_english.txt"), name: "english" };
/// default French dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#french)
///
pub const FRENCH: DefaultDictionary =
DefaultDictionary { words: include!("dicts/bip39_french.txt"), name: "french" };
/// default Japanese dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#japanese)
///
pub const JAPANESE: DefaultDictionary =
DefaultDictionary { words: include!("dicts/bip39_japanese.txt"), name: "japanese" };
/// default Korean dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#japanese)
///
pub const KOREAN: DefaultDictionary =
DefaultDictionary { words: include!("dicts/bip39_korean.txt"), name: "korean" };
/// default chinese simplified dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#chinese)
///
pub const CHINESE_SIMPLIFIED: DefaultDictionary = DefaultDictionary {
words: include!("dicts/bip39_chinese_simplified.txt"),
name: "chinese-simplified"
};
/// default chinese traditional dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#chinese)
///
pub const CHINESE_TRADITIONAL: DefaultDictionary = DefaultDictionary {
words: include!("dicts/bip39_chinese_traditional.txt"),
name: "chinese-traditional"
};
/// default italian dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#italian)
///
pub const ITALIAN: DefaultDictionary =
DefaultDictionary { words: include!("dicts/bip39_italian.txt"), name: "italian" };
/// default spanish dictionary as provided by the
/// [BIP39 standard](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md#spanish)
///
pub const SPANISH: DefaultDictionary =
DefaultDictionary { words: include!("dicts/bip39_spanish.txt"), name: "spanish" };
}
#[cfg(test)]
mod test {
use super::*;
use rand_os::rand_core::RngCore;
use rand_os::OsRng;
extern crate unicode_normalization;
use self::unicode_normalization::UnicodeNormalization;
use self::dictionary::Language;
#[test]
fn english_dic() {
let dic = &dictionary::ENGLISH;
assert_eq!(dic.lookup_mnemonic("abandon"), Ok(MnemonicIndex(0)));
assert_eq!(dic.lookup_mnemonic("crack"), Ok(MnemonicIndex(398)));
assert_eq!(dic.lookup_mnemonic("shell"), Ok(MnemonicIndex(1579)));
assert_eq!(dic.lookup_mnemonic("zoo"), Ok(MnemonicIndex(2047)));
assert_eq!(dic.lookup_word(MnemonicIndex(0)), Ok("abandon".to_string()));
assert_eq!(dic.lookup_word(MnemonicIndex(398)), Ok("crack".to_string()));
assert_eq!(dic.lookup_word(MnemonicIndex(1579)), Ok("shell".to_string()));
assert_eq!(dic.lookup_word(MnemonicIndex(2047)), Ok("zoo".to_string()));
}
#[test]
fn mnemonic_zero() {
let entropy = Entropy::Entropy12([0; 16]);
let mnemonics = entropy.to_mnemonics();
let entropy2 = Entropy::from_mnemonics(&mnemonics).unwrap();
assert_eq!(entropy, entropy2);
}
#[test]
fn mnemonic_7f() {
let entropy = Entropy::Entropy12([0x7f; 16]);
let mnemonics = entropy.to_mnemonics();
let entropy2 = Entropy::from_mnemonics(&mnemonics).unwrap();
assert_eq!(entropy, entropy2);
}
#[test]
fn from_mnemonic_to_mnemonic() {
let entropy = Entropy::generate(Type::Type12Words, |bytes| OsRng.fill_bytes(bytes));
let mnemonics = entropy.to_mnemonics();
let entropy2 = Entropy::from_mnemonics(&mnemonics).unwrap();
assert_eq!(entropy, entropy2);
}
#[derive(Debug)]
struct TestVector {
entropy: &'static str,
mnemonics: &'static str,
seed: &'static str,
passphrase: &'static str
}
fn mk_test<D: dictionary::Language>(test: &TestVector, dic: &D) {
// decompose the UTF8 inputs before processing:
let mnemonics: String = test.mnemonics.nfkd().collect();
let passphrase: String = test.passphrase.nfkd().collect();
let mnemonics_ref = Mnemonics::from_string(dic, &mnemonics).expect("valid mnemonics");
let mnemonics_str = MnemonicString::new(dic, mnemonics).expect("valid mnemonics string");
let entropy_ref =
Entropy::from_slice(&hex::decode(test.entropy).unwrap()).expect("decode entropy from hex");
let seed_ref =
Seed::from_slice(&hex::decode(test.seed).unwrap()).expect("decode seed from hex");
assert!(mnemonics_ref.get_type() == entropy_ref.get_type());
assert!(entropy_ref.to_mnemonics() == mnemonics_ref);
assert!(
entropy_ref
== Entropy::from_mnemonics(&mnemonics_ref).expect("retrieve entropy from mnemonics")
);
assert_eq!(seed_ref, Seed::from_mnemonic_string(&mnemonics_str, passphrase.as_bytes()));
}
fn mk_tests<D: dictionary::Language>(tests: &[TestVector], dic: &D) {
for test in tests {
mk_test(test, dic);
}
}
#[test]
fn test_vectors_english() {
mk_tests(TEST_VECTORS_ENGLISH, &dictionary::ENGLISH)
}
#[test]
fn test_vectors_japanese() {
mk_tests(TEST_VECTORS_JAPANESE, &dictionary::JAPANESE)
}
const TEST_VECTORS_ENGLISH: &'static [TestVector] = &include!("test_vectors/bip39_english.txt");
const TEST_VECTORS_JAPANESE: &'static [TestVector] = &include!("test_vectors/bip39_japanese.txt");
}
| 32.116775 | 135 | 0.623218 |
1ae8e067cf4735b92a306b993dffc79ad1967616 | 683 | #[macro_use]
extern crate diesel;
use diesel::*;
table! {
users {
id -> Integer,
name -> VarChar,
}
}
table! {
posts {
id -> Integer,
title -> VarChar,
user_id -> Integer,
}
}
fn main() {
let stuff = users::table.select((posts::id, posts::user_id));
//~^ ERROR Selectable
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
let stuff = users::table.select((posts::id, users::name));
//~^ ERROR Selectable
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
//~| ERROR E0277
}
| 17.512821 | 65 | 0.510981 |
ac3c2ec3360fc87a6ec18f3a5179cf73a3ad11fb | 85,281 | use std::borrow::Cow;
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use bigdecimal::BigDecimal;
use indexmap::IndexMap;
use log::trace;
use nu_errors::{ArgumentError, ParseError};
use nu_path::{expand_path, expand_path_string};
use nu_protocol::hir::{
self, Binary, Block, Call, ClassifiedCommand, Expression, ExternalRedirection, Flag, FlagKind,
Group, InternalCommand, Member, NamedArguments, Operator, Pipeline, RangeOperator,
SpannedExpression, Synthetic, Unit,
};
use nu_protocol::{NamedType, PositionalType, Signature, SyntaxShape, UnspannedPathMember};
use nu_source::{HasSpan, Span, Spanned, SpannedItem};
use num_bigint::BigInt;
use crate::{lex::lexer::NewlineMode, parse::def::parse_parameter};
use crate::{
lex::lexer::{lex, parse_block},
ParserScope,
};
use crate::{
lex::{
lexer::Token,
tokens::{LiteBlock, LiteCommand, LitePipeline, TokenContents},
},
parse::def::lex_split_baseline_tokens_on,
};
use self::{
def::{parse_definition, parse_definition_prototype},
util::trim_quotes,
util::verify_and_strip,
};
mod def;
mod util;
pub use self::util::garbage;
/// Parses a simple column path, one without a variable (implied or explicit) at the head
pub fn parse_simple_column_path(
lite_arg: &Spanned<String>,
) -> (SpannedExpression, Option<ParseError>) {
let mut delimiter = '.';
let mut inside_delimiter = false;
let mut output = vec![];
let mut current_part = String::new();
let mut start_index = 0;
let mut last_index = 0;
for (idx, c) in lite_arg.item.char_indices() {
last_index = idx;
if inside_delimiter {
if c == delimiter {
inside_delimiter = false;
}
} else if c == '\'' || c == '"' {
inside_delimiter = true;
delimiter = c;
} else if c == '.' {
let part_span = Span::new(
lite_arg.span.start() + start_index,
lite_arg.span.start() + idx,
);
if let Ok(row_number) = current_part.parse::<i64>() {
output.push(Member::Int(row_number, part_span));
} else {
let trimmed = trim_quotes(¤t_part);
output.push(Member::Bare(trimmed.spanned(part_span)));
}
current_part.clear();
// Note: I believe this is safe because of the delimiter we're using,
// but if we get fancy with Unicode we'll need to change this.
start_index = idx + '.'.len_utf8();
continue;
}
current_part.push(c);
}
if !current_part.is_empty() {
let part_span = Span::new(
lite_arg.span.start() + start_index,
lite_arg.span.start() + last_index + 1,
);
if let Ok(row_number) = current_part.parse::<i64>() {
output.push(Member::Int(row_number, part_span));
} else {
let current_part = trim_quotes(¤t_part);
output.push(Member::Bare(current_part.spanned(part_span)));
}
}
(
SpannedExpression::new(Expression::simple_column_path(output), lite_arg.span),
None,
)
}
/// Parses a column path, adding in the preceding reference to $it if it's elided
pub fn parse_full_column_path(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (SpannedExpression, Option<ParseError>) {
let mut inside_delimiter = vec![];
let mut output = vec![];
let mut current_part = String::new();
let mut start_index = 0;
let mut last_index = 0;
let mut error = None;
let mut head = None;
for (idx, c) in lite_arg.item.char_indices() {
last_index = idx;
if c == '(' {
inside_delimiter.push(')');
} else if let Some(delimiter) = inside_delimiter.last() {
if c == *delimiter {
inside_delimiter.pop();
}
} else if c == '\'' || c == '"' {
inside_delimiter.push(c);
} else if c == '.' {
let part_span = Span::new(
lite_arg.span.start() + start_index,
lite_arg.span.start() + idx,
);
if head.is_none() && current_part.starts_with('(') && current_part.ends_with(')') {
let (invoc, err) =
parse_subexpression(¤t_part.clone().spanned(part_span), scope);
if error.is_none() {
error = err;
}
head = Some(invoc.expr);
} else if head.is_none() && current_part.starts_with('$') {
// We have the variable head
head = Some(Expression::variable(current_part.clone(), part_span))
} else if let Ok(row_number) = current_part.parse::<i64>() {
output.push(UnspannedPathMember::Int(row_number).into_path_member(part_span));
} else {
let current_part = trim_quotes(¤t_part);
output.push(
UnspannedPathMember::String(current_part.clone()).into_path_member(part_span),
);
}
current_part.clear();
// Note: I believe this is safe because of the delimiter we're using,
// but if we get fancy with Unicode we'll need to change this.
start_index = idx + '.'.len_utf8();
continue;
}
current_part.push(c);
}
if !current_part.is_empty() {
let part_span = Span::new(
lite_arg.span.start() + start_index,
lite_arg.span.start() + last_index + 1,
);
if head.is_none() {
if current_part.starts_with('(') && current_part.ends_with(')') {
let (invoc, err) = parse_subexpression(¤t_part.spanned(part_span), scope);
if error.is_none() {
error = err;
}
head = Some(invoc.expr);
} else if current_part.starts_with('$') {
head = Some(Expression::variable(current_part, lite_arg.span));
} else if let Ok(row_number) = current_part.parse::<i64>() {
output.push(UnspannedPathMember::Int(row_number).into_path_member(part_span));
} else {
let current_part = trim_quotes(¤t_part);
output.push(UnspannedPathMember::String(current_part).into_path_member(part_span));
}
} else if let Ok(row_number) = current_part.parse::<i64>() {
output.push(UnspannedPathMember::Int(row_number).into_path_member(part_span));
} else {
let current_part = trim_quotes(¤t_part);
output.push(UnspannedPathMember::String(current_part).into_path_member(part_span));
}
}
if let Some(head) = head {
(
SpannedExpression::new(
Expression::path(SpannedExpression::new(head, lite_arg.span), output),
lite_arg.span,
),
error,
)
} else {
(
SpannedExpression::new(
Expression::path(
SpannedExpression::new(
Expression::variable("$it".into(), lite_arg.span),
lite_arg.span,
),
output,
),
lite_arg.span,
),
error,
)
}
}
/// Parse a numeric range
fn parse_range(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (SpannedExpression, Option<ParseError>) {
let lite_arg_span_start = lite_arg.span.start();
let lite_arg_len = lite_arg.item.len();
let (dotdot_pos, operator_str, operator) = if let Some(pos) = lite_arg.item.find("..<") {
(pos, "..<", RangeOperator::RightExclusive)
} else if let Some(pos) = lite_arg.item.find("..") {
(pos, "..", RangeOperator::Inclusive)
} else {
return (
garbage(lite_arg.span),
Some(ParseError::mismatch("range", lite_arg.clone())),
);
};
if lite_arg.item[0..dotdot_pos].is_empty()
&& lite_arg.item[(dotdot_pos + operator_str.len())..].is_empty()
{
return (
garbage(lite_arg.span),
Some(ParseError::mismatch("range", lite_arg.clone())),
);
}
let numbers: Vec<_> = lite_arg.item.split(operator_str).collect();
if numbers.len() != 2 {
return (
garbage(lite_arg.span),
Some(ParseError::mismatch("range", lite_arg.clone())),
);
}
let right_number_offset = operator_str.len();
let lhs = numbers[0].to_string().spanned(Span::new(
lite_arg_span_start,
lite_arg_span_start + dotdot_pos,
));
let rhs = numbers[1].to_string().spanned(Span::new(
lite_arg_span_start + dotdot_pos + right_number_offset,
lite_arg_span_start + lite_arg_len,
));
let left_hand_open = dotdot_pos == 0;
let right_hand_open = dotdot_pos == lite_arg_len - right_number_offset;
let left = if left_hand_open {
None
} else if let (left, None) = parse_arg(SyntaxShape::Number, scope, &lhs) {
Some(left)
} else {
return (
garbage(lite_arg.span),
Some(ParseError::mismatch("range", lhs)),
);
};
let right = if right_hand_open {
None
} else if let (right, None) = parse_arg(SyntaxShape::Number, scope, &rhs) {
Some(right)
} else {
return (
garbage(lite_arg.span),
Some(ParseError::mismatch("range", rhs)),
);
};
(
SpannedExpression::new(
Expression::range(
left,
operator.spanned(Span::new(
lite_arg_span_start + dotdot_pos,
lite_arg_span_start + dotdot_pos + right_number_offset,
)),
right,
),
lite_arg.span,
),
None,
)
}
/// Parse any allowed operator, including word-based operators
fn parse_operator(lite_arg: &Spanned<String>) -> (SpannedExpression, Option<ParseError>) {
let operator = match &lite_arg.item[..] {
"==" => Operator::Equal,
"!=" => Operator::NotEqual,
"<" => Operator::LessThan,
"<=" => Operator::LessThanOrEqual,
">" => Operator::GreaterThan,
">=" => Operator::GreaterThanOrEqual,
"=~" => Operator::Contains,
"!~" => Operator::NotContains,
"+" => Operator::Plus,
"-" => Operator::Minus,
"*" => Operator::Multiply,
"/" => Operator::Divide,
"in" => Operator::In,
"not-in" => Operator::NotIn,
"mod" => Operator::Modulo,
"&&" => Operator::And,
"||" => Operator::Or,
"**" => Operator::Pow,
_ => {
return (
garbage(lite_arg.span),
Some(ParseError::mismatch("operator", lite_arg.clone())),
);
}
};
(
SpannedExpression::new(Expression::operator(operator), lite_arg.span),
None,
)
}
/// Parse a duration type, eg '10day'
fn parse_duration(lite_arg: &Spanned<String>) -> (SpannedExpression, Option<ParseError>) {
fn parse_decimal_str_to_number(decimal: &str) -> Option<i64> {
let string_to_parse = format!("0.{}", decimal);
if let Ok(x) = string_to_parse.parse::<f64>() {
return Some((1_f64 / x) as i64);
}
None
}
let unit_groups = [
(Unit::Nanosecond, "NS", None),
(Unit::Microsecond, "US", Some((Unit::Nanosecond, 1000))),
(Unit::Millisecond, "MS", Some((Unit::Microsecond, 1000))),
(Unit::Second, "SEC", Some((Unit::Millisecond, 1000))),
(Unit::Minute, "MIN", Some((Unit::Second, 60))),
(Unit::Hour, "HR", Some((Unit::Minute, 60))),
(Unit::Day, "DAY", Some((Unit::Minute, 1440))),
(Unit::Week, "WK", Some((Unit::Day, 7))),
];
if let Some(unit) = unit_groups
.iter()
.find(|&x| lite_arg.to_uppercase().ends_with(x.1))
{
let mut lhs = lite_arg.item.clone();
for _ in 0..unit.1.len() {
lhs.pop();
}
let input: Vec<&str> = lhs.split('.').collect();
let (value, unit_to_use) = match &input[..] {
[number_str] => (number_str.parse::<i64>().ok(), unit.0),
[number_str, decimal_part_str] => match unit.2 {
Some(unit_to_convert_to) => match (
number_str.parse::<i64>(),
parse_decimal_str_to_number(decimal_part_str),
) {
(Ok(number), Some(decimal_part)) => (
Some(
(number * unit_to_convert_to.1) + (unit_to_convert_to.1 / decimal_part),
),
unit_to_convert_to.0,
),
_ => (None, unit.0),
},
None => (None, unit.0),
},
_ => (None, unit.0),
};
if let Some(x) = value {
let lhs_span = Span::new(lite_arg.span.start(), lite_arg.span.start() + lhs.len());
let unit_span = Span::new(lite_arg.span.start() + lhs.len(), lite_arg.span.end());
return (
SpannedExpression::new(
Expression::unit(x.spanned(lhs_span), unit_to_use.spanned(unit_span)),
lite_arg.span,
),
None,
);
}
}
(
garbage(lite_arg.span),
Some(ParseError::mismatch("duration", lite_arg.clone())),
)
}
/// Parse a unit type, eg '10kb'
fn parse_filesize(lite_arg: &Spanned<String>) -> (SpannedExpression, Option<ParseError>) {
fn parse_decimal_str_to_number(decimal: &str) -> Option<i64> {
let string_to_parse = format!("0.{}", decimal);
if let Ok(x) = string_to_parse.parse::<f64>() {
return Some((1_f64 / x) as i64);
}
None
}
let unit_groups = [
(Unit::Kilobyte, "KB", Some((Unit::Byte, 1000))),
(Unit::Megabyte, "MB", Some((Unit::Kilobyte, 1000))),
(Unit::Gigabyte, "GB", Some((Unit::Megabyte, 1000))),
(Unit::Terabyte, "TB", Some((Unit::Gigabyte, 1000))),
(Unit::Petabyte, "PB", Some((Unit::Terabyte, 1000))),
(Unit::Kibibyte, "KIB", Some((Unit::Byte, 1024))),
(Unit::Mebibyte, "MIB", Some((Unit::Kibibyte, 1024))),
(Unit::Gibibyte, "GIB", Some((Unit::Mebibyte, 1024))),
(Unit::Tebibyte, "TIB", Some((Unit::Gibibyte, 1024))),
(Unit::Pebibyte, "PIB", Some((Unit::Tebibyte, 1024))),
(Unit::Byte, "B", None),
];
if let Some(unit) = unit_groups
.iter()
.find(|&x| lite_arg.to_uppercase().ends_with(x.1))
{
let mut lhs = lite_arg.item.clone();
for _ in 0..unit.1.len() {
lhs.pop();
}
let input: Vec<&str> = lhs.split('.').collect();
let (value, unit_to_use) = match &input[..] {
[number_str] => (number_str.parse::<i64>().ok(), unit.0),
[number_str, decimal_part_str] => match unit.2 {
Some(unit_to_convert_to) => match (
number_str.parse::<i64>(),
parse_decimal_str_to_number(decimal_part_str),
) {
(Ok(number), Some(decimal_part)) => (
Some(
(number * unit_to_convert_to.1) + (unit_to_convert_to.1 / decimal_part),
),
unit_to_convert_to.0,
),
_ => (None, unit.0),
},
None => (None, unit.0),
},
_ => (None, unit.0),
};
if let Some(x) = value {
let lhs_span = Span::new(lite_arg.span.start(), lite_arg.span.start() + lhs.len());
let unit_span = Span::new(lite_arg.span.start() + lhs.len(), lite_arg.span.end());
return (
SpannedExpression::new(
Expression::unit(x.spanned(lhs_span), unit_to_use.spanned(unit_span)),
lite_arg.span,
),
None,
);
}
}
(
garbage(lite_arg.span),
Some(ParseError::mismatch("unit", lite_arg.clone())),
)
}
fn parse_subexpression(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (SpannedExpression, Option<ParseError>) {
let mut error = None;
let string: String = lite_arg
.item
.chars()
.skip(1)
.take(lite_arg.item.chars().count() - 2)
.collect();
// We haven't done much with the inner string, so let's go ahead and work with it
let (tokens, err) = lex(&string, lite_arg.span.start() + 1, NewlineMode::Whitespace);
if error.is_none() {
error = err;
};
let (lite_block, err) = parse_block(tokens);
if error.is_none() {
error = err;
};
scope.enter_scope();
let (classified_block, err) = classify_block(&lite_block, scope);
if error.is_none() {
error = err;
};
scope.exit_scope();
(
SpannedExpression::new(Expression::Subexpression(classified_block), lite_arg.span),
error,
)
}
fn parse_variable(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (SpannedExpression, Option<ParseError>) {
if lite_arg.item == "$it" {
trace!("parsing $it");
parse_full_column_path(lite_arg, scope)
} else {
(
SpannedExpression::new(
Expression::variable(lite_arg.item.clone(), lite_arg.span),
lite_arg.span,
),
None,
)
}
}
/// Parses the given lite_arg starting with dollar returning
/// a expression starting with $
/// Currently either Variable, String interpolation, FullColumnPath
fn parse_dollar_expr(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (SpannedExpression, Option<ParseError>) {
trace!("Parsing dollar expression: {:?}", lite_arg.item);
if (lite_arg.item.starts_with("$\"") && lite_arg.item.len() > 1 && lite_arg.item.ends_with('"'))
|| (lite_arg.item.starts_with("$'")
&& lite_arg.item.len() > 1
&& lite_arg.item.ends_with('\''))
{
// This is an interpolated string
parse_interpolated_string(lite_arg, scope)
} else if let (expr, None) = parse_range(lite_arg, scope) {
(expr, None)
} else if let (expr, None) = parse_full_column_path(lite_arg, scope) {
(expr, None)
} else {
parse_variable(lite_arg, scope)
}
}
#[derive(Debug)]
enum FormatCommand {
Text(Spanned<String>),
Column(Spanned<String>),
}
fn format(input: &str, start: usize) -> (Vec<FormatCommand>, Option<ParseError>) {
let original_start = start;
let mut output = vec![];
let mut error = None;
let mut loop_input = input.chars().peekable();
let mut start = start;
let mut end = start;
loop {
let mut before = String::new();
loop {
end += 1;
if let Some(c) = loop_input.next() {
if c == '(' {
break;
}
before.push(c);
} else {
break;
}
}
if !before.is_empty() {
output.push(FormatCommand::Text(
before.to_string().spanned(Span::new(start, end - 1)),
));
}
// Look for column as we're now at one
let mut column = String::new();
start = end;
let mut found_end = false;
let mut delimiter_stack = vec![')'];
for c in &mut loop_input {
end += 1;
if let Some('\'') = delimiter_stack.last() {
if c == '\'' {
delimiter_stack.pop();
}
} else if let Some('"') = delimiter_stack.last() {
if c == '"' {
delimiter_stack.pop();
}
} else if c == '\'' {
delimiter_stack.push('\'');
} else if c == '"' {
delimiter_stack.push('"');
} else if c == '(' {
delimiter_stack.push(')');
} else if c == ')' {
if let Some(')') = delimiter_stack.last() {
delimiter_stack.pop();
}
if delimiter_stack.is_empty() {
found_end = true;
break;
}
}
column.push(c);
}
if !column.is_empty() {
output.push(FormatCommand::Column(
column.to_string().spanned(Span::new(start, end)),
));
}
if column.is_empty() {
break;
}
if !found_end {
error = Some(ParseError::argument_error(
input.spanned(Span::new(original_start, end)),
ArgumentError::MissingValueForName("unclosed ()".to_string()),
));
}
start = end;
}
(output, error)
}
/// Parses an interpolated string, one that has expressions inside of it
fn parse_interpolated_string(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (SpannedExpression, Option<ParseError>) {
trace!("Parse_interpolated_string");
let string_len = lite_arg.item.chars().count();
let inner_string = lite_arg
.item
.chars()
.skip(2)
.take(string_len - 3)
.collect::<String>();
let mut error = None;
let (format_result, err) = format(&inner_string, lite_arg.span.start() + 2);
if error.is_none() {
error = err;
}
let mut output = vec![];
for f in format_result {
match f {
FormatCommand::Text(t) => {
output.push(SpannedExpression {
expr: Expression::Literal(hir::Literal::String(t.item)),
span: t.span,
});
}
FormatCommand::Column(c) => {
let result = parse(&c, c.span.start(), scope);
match result {
(classified_block, None) => {
output.push(SpannedExpression {
expr: Expression::Subexpression(classified_block),
span: c.span,
});
}
(_, Some(err)) => {
return (garbage(c.span), Some(err));
}
}
}
}
}
let pipelines = vec![Pipeline {
span: lite_arg.span,
list: vec![ClassifiedCommand::Internal(InternalCommand {
name: "build-string".to_owned(),
name_span: lite_arg.span,
args: hir::Call {
head: Box::new(SpannedExpression {
expr: Expression::Synthetic(hir::Synthetic::String("build-string".to_owned())),
span: lite_arg.span,
}),
external_redirection: ExternalRedirection::Stdout,
named: None,
positional: Some(output),
span: lite_arg.span,
},
})],
}];
let group = Group::new(pipelines, lite_arg.span);
let call = SpannedExpression {
expr: Expression::Subexpression(Arc::new(Block::new(
Signature::new("<subexpression>"),
vec![group],
IndexMap::new(),
lite_arg.span,
))),
span: lite_arg.span,
};
(call, error)
}
/// Parses the given argument using the shape as a guide for how to correctly parse the argument
fn parse_external_arg(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (SpannedExpression, Option<ParseError>) {
if lite_arg.item.starts_with('$') {
parse_dollar_expr(lite_arg, scope)
} else if lite_arg.item.starts_with('(') {
parse_full_column_path(lite_arg, scope)
} else {
(
SpannedExpression::new(Expression::string(lite_arg.item.clone()), lite_arg.span),
None,
)
}
}
fn parse_list(
lite_block: &LiteBlock,
scope: &dyn ParserScope,
) -> (Vec<SpannedExpression>, Option<ParseError>) {
let mut error = None;
if lite_block.block.is_empty() {
return (vec![], None);
}
let lite_pipeline = &lite_block.block[0];
let mut output = vec![];
for lite_pipeline in &lite_pipeline.pipelines {
for lite_inner in &lite_pipeline.commands {
for part in &lite_inner.parts {
let item = if part.ends_with(',') {
let mut str: String = part.item.clone();
str.pop();
str.spanned(Span::new(part.span.start(), part.span.end() - 1))
} else {
part.clone()
};
let (part, err) = parse_arg(SyntaxShape::Any, scope, &item);
output.push(part);
if error.is_none() {
error = err;
}
}
}
}
(output, error)
}
fn parse_table(
lite_block: &LiteBlock,
scope: &dyn ParserScope,
span: Span,
) -> (SpannedExpression, Option<ParseError>) {
let mut error = None;
let mut output = vec![];
// Header
let lite_group = &lite_block.block[0];
let lite_pipeline = &lite_group.pipelines[0];
let lite_inner = &lite_pipeline.commands[0];
let (string, err) = verify_and_strip(&lite_inner.parts[0], '[', ']');
if error.is_none() {
error = err;
}
let (tokens, err) = lex(
&string,
lite_inner.parts[0].span.start() + 1,
NewlineMode::Whitespace,
);
if err.is_some() {
return (garbage(lite_inner.span()), err);
}
let (lite_header, err) = parse_block(tokens);
if err.is_some() {
return (garbage(lite_inner.span()), err);
}
let (headers, err) = parse_list(&lite_header, scope);
if error.is_none() {
error = err;
}
// Cells
let lite_rows = &lite_group.pipelines[1];
let lite_cells = &lite_rows.commands[0];
for arg in &lite_cells.parts {
let (string, err) = verify_and_strip(arg, '[', ']');
if error.is_none() {
error = err;
}
let (tokens, err) = lex(&string, arg.span.start() + 1, NewlineMode::Whitespace);
if err.is_some() {
return (garbage(arg.span), err);
}
let (lite_cell, err) = parse_block(tokens);
if err.is_some() {
return (garbage(arg.span), err);
}
let (inner_cell, err) = parse_list(&lite_cell, scope);
if error.is_none() {
error = err;
}
output.push(inner_cell);
}
(
SpannedExpression::new(Expression::Table(headers, output), span),
error,
)
}
fn parse_int(lite_arg: &Spanned<String>) -> (SpannedExpression, Option<ParseError>) {
if lite_arg.item.starts_with("0x") {
if let Ok(v) = i64::from_str_radix(&lite_arg.item[2..], 16) {
(
SpannedExpression::new(Expression::integer(v), lite_arg.span),
None,
)
} else {
(
garbage(lite_arg.span),
Some(ParseError::mismatch("int", lite_arg.clone())),
)
}
} else if lite_arg.item.starts_with("0b") {
if let Ok(v) = i64::from_str_radix(&lite_arg.item[2..], 2) {
(
SpannedExpression::new(Expression::integer(v), lite_arg.span),
None,
)
} else {
(
garbage(lite_arg.span),
Some(ParseError::mismatch("int", lite_arg.clone())),
)
}
} else if lite_arg.item.starts_with("0o") {
if let Ok(v) = i64::from_str_radix(&lite_arg.item[2..], 8) {
(
SpannedExpression::new(Expression::integer(v), lite_arg.span),
None,
)
} else {
(
garbage(lite_arg.span),
Some(ParseError::mismatch("int", lite_arg.clone())),
)
}
} else if let Ok(x) = lite_arg.item.parse::<i64>() {
(
SpannedExpression::new(Expression::integer(x), lite_arg.span),
None,
)
} else {
(
garbage(lite_arg.span),
Some(ParseError::mismatch("int", lite_arg.clone())),
)
}
}
/// Parses the given argument using the shape as a guide for how to correctly parse the argument
fn parse_arg(
expected_type: SyntaxShape,
scope: &dyn ParserScope,
lite_arg: &Spanned<String>,
) -> (SpannedExpression, Option<ParseError>) {
if lite_arg.item.starts_with('$') {
return parse_dollar_expr(lite_arg, scope);
}
// before anything else, try to see if this is a number in paranthesis
if lite_arg.item.starts_with('(') {
return parse_full_column_path(lite_arg, scope);
}
match expected_type {
SyntaxShape::Number => {
if let (x, None) = parse_int(lite_arg) {
(x, None)
} else if let Ok(x) = lite_arg.item.parse::<BigInt>() {
(
SpannedExpression::new(Expression::big_integer(x), lite_arg.span),
None,
)
} else if let Ok(x) = lite_arg.item.parse::<BigDecimal>() {
(
SpannedExpression::new(Expression::decimal(x), lite_arg.span),
None,
)
} else {
(
garbage(lite_arg.span),
Some(ParseError::mismatch("number", lite_arg.clone())),
)
}
}
SyntaxShape::Int => {
if let Ok(x) = lite_arg.item.parse::<i64>() {
(
SpannedExpression::new(Expression::integer(x), lite_arg.span),
None,
)
} else if let Ok(x) = lite_arg.item.parse::<BigInt>() {
(
SpannedExpression::new(Expression::big_integer(x), lite_arg.span),
None,
)
} else {
(
garbage(lite_arg.span),
Some(ParseError::mismatch("int", lite_arg.clone())),
)
}
}
SyntaxShape::String => {
let trimmed = trim_quotes(&lite_arg.item);
(
SpannedExpression::new(Expression::string(trimmed), lite_arg.span),
None,
)
}
SyntaxShape::GlobPattern => {
let trimmed = Cow::Owned(trim_quotes(&lite_arg.item));
let expanded = expand_path_string(trimmed).to_string();
(
SpannedExpression::new(Expression::glob_pattern(expanded), lite_arg.span),
None,
)
}
SyntaxShape::Range => parse_range(lite_arg, scope),
SyntaxShape::Operator => (
garbage(lite_arg.span),
Some(ParseError::mismatch("operator", lite_arg.clone())),
),
SyntaxShape::Filesize => parse_filesize(lite_arg),
SyntaxShape::Duration => parse_duration(lite_arg),
SyntaxShape::FilePath => {
let trimmed = trim_quotes(&lite_arg.item);
let path = PathBuf::from(trimmed);
let expanded = expand_path(Cow::Owned(path)).to_path_buf();
(
SpannedExpression::new(Expression::FilePath(expanded), lite_arg.span),
None,
)
}
SyntaxShape::ColumnPath => parse_simple_column_path(lite_arg),
SyntaxShape::FullColumnPath => parse_full_column_path(lite_arg, scope),
SyntaxShape::Any => {
let shapes = vec![
SyntaxShape::Int,
SyntaxShape::Number,
SyntaxShape::Range,
SyntaxShape::Filesize,
SyntaxShape::Duration,
SyntaxShape::Block,
SyntaxShape::Table,
SyntaxShape::String,
];
for shape in shapes.iter() {
if let (s, None) = parse_arg(*shape, scope, lite_arg) {
return (s, None);
}
}
(
garbage(lite_arg.span),
Some(ParseError::mismatch("any shape", lite_arg.clone())),
)
}
SyntaxShape::Table => {
let mut chars = lite_arg.item.chars();
match (chars.next(), chars.next_back()) {
(Some('['), Some(']')) => {
// We have a literal row
let string: String = chars.collect();
// We haven't done much with the inner string, so let's go ahead and work with it
let (tokens, err) =
lex(&string, lite_arg.span.start() + 1, NewlineMode::Whitespace);
if err.is_some() {
return (garbage(lite_arg.span), err);
}
let (lite_block, err) = parse_block(tokens);
if err.is_some() {
return (garbage(lite_arg.span), err);
}
let lite_groups = &lite_block.block;
if lite_groups.is_empty() {
return (
SpannedExpression::new(Expression::List(vec![]), lite_arg.span),
None,
);
}
if lite_groups[0].pipelines.len() == 1 {
let (items, err) = parse_list(&lite_block, scope);
(
SpannedExpression::new(Expression::List(items), lite_arg.span),
err,
)
} else if lite_groups[0].pipelines.len() == 2 {
parse_table(&lite_block, scope, lite_arg.span)
} else {
(
garbage(lite_arg.span),
Some(ParseError::mismatch(
"list or table",
"unknown".to_string().spanned(lite_arg.span),
)),
)
}
}
_ => (
garbage(lite_arg.span),
Some(ParseError::mismatch("table", lite_arg.clone())),
),
}
}
SyntaxShape::MathExpression => parse_arg(SyntaxShape::Any, scope, lite_arg),
SyntaxShape::Block | SyntaxShape::RowCondition => {
// Blocks have one of two forms: the literal block and the implied block
// To parse a literal block, we need to detect that what we have is itself a block
let mut chars: Vec<_> = lite_arg.item.chars().collect();
match chars.first() {
Some('{') => {
let mut error = None;
if let Some('}') = chars.last() {
chars = chars[1..(chars.len() - 1)].to_vec();
} else {
chars = chars[1..].to_vec();
error = Some(ParseError::unclosed(
"}".into(),
Span::new(lite_arg.span.end(), lite_arg.span.end()),
));
}
// We have a literal block
let string: String = chars.into_iter().collect();
// We haven't done much with the inner string, so let's go ahead and work with it
let (mut tokens, err) =
lex(&string, lite_arg.span.start() + 1, NewlineMode::Normal);
if error.is_none() {
error = err;
}
// Check to see if we have parameters
let params = if matches!(
tokens.first(),
Some(Token {
contents: TokenContents::Pipe,
..
})
) {
// We've found a parameter list
let mut param_tokens = vec![];
let mut token_iter = tokens.into_iter().skip(1);
for token in &mut token_iter {
if matches!(
token,
Token {
contents: TokenContents::Pipe,
..
}
) {
break;
} else {
param_tokens.push(token);
}
}
let split_tokens =
lex_split_baseline_tokens_on(param_tokens, &[',', ':', '?']);
let mut i = 0;
let mut params = vec![];
while i < split_tokens.len() {
let (parameter, advance_by, error) =
parse_parameter(&split_tokens[i..], split_tokens[i].span);
if error.is_some() {
return (garbage(lite_arg.span), error);
}
i += advance_by;
params.push(parameter);
}
tokens = token_iter.collect();
if tokens.is_empty() {
return (
garbage(lite_arg.span),
Some(ParseError::mismatch(
"block with parameters",
lite_arg.clone(),
)),
);
}
params
} else {
vec![]
};
let (lite_block, err) = parse_block(tokens);
if error.is_none() {
error = err;
}
scope.enter_scope();
let (mut classified_block, err) = classify_block(&lite_block, scope);
if error.is_none() {
error = err;
}
scope.exit_scope();
if let Some(classified_block) = Arc::get_mut(&mut classified_block) {
classified_block.span = lite_arg.span;
if !params.is_empty() {
classified_block.params.positional.clear();
for param in params {
classified_block
.params
.positional
.push((param.pos_type, param.desc.unwrap_or_default()));
}
}
}
(
SpannedExpression::new(Expression::Block(classified_block), lite_arg.span),
error,
)
}
_ => {
// We have an implied block, but we can't parse this here
// it needed to have been parsed up higher where we have control over more than one arg
(
garbage(lite_arg.span),
Some(ParseError::mismatch("block", lite_arg.clone())),
)
}
}
}
}
}
/// This is a bit of a "fix-up" of previously parsed areas. In cases where we're in shorthand mode (eg in the `where` command), we need
/// to use the original source to parse a column path. Without it, we'll lose a little too much information to parse it correctly. As we'll
/// only know we were on the left-hand side of an expression after we do the full math parse, we need to do this step after rather than during
/// the initial parse.
fn shorthand_reparse(
left: SpannedExpression,
orig_left: Option<Spanned<String>>,
scope: &dyn ParserScope,
shorthand_mode: bool,
) -> (SpannedExpression, Option<ParseError>) {
// If we're in shorthand mode, we need to reparse the left-hand side if possible
if shorthand_mode {
if let Some(orig_left) = orig_left {
parse_arg(SyntaxShape::FullColumnPath, scope, &orig_left)
} else {
(left, None)
}
} else {
(left, None)
}
}
fn parse_possibly_parenthesized(
lite_arg: &Spanned<String>,
scope: &dyn ParserScope,
) -> (
(Option<Spanned<String>>, SpannedExpression),
Option<ParseError>,
) {
let (lhs, err) = parse_arg(SyntaxShape::Any, scope, lite_arg);
((Some(lite_arg.clone()), lhs), err)
}
/// Handle parsing math expressions, complete with working with the precedence of the operators
pub fn parse_math_expression(
incoming_idx: usize,
lite_args: &[Spanned<String>],
scope: &dyn ParserScope,
shorthand_mode: bool,
) -> (usize, SpannedExpression, Option<ParseError>) {
// Precedence parsing is included
// shorthand_mode means that the left-hand side of an expression can point to a column-path.
// To make this possible, we parse as normal, but then go back and when we detect a
// left-hand side, reparse that value if it's a string
let mut idx = 0;
let mut error = None;
let mut working_exprs = vec![];
let mut prec = vec![];
let (lhs_working_expr, err) = parse_possibly_parenthesized(&lite_args[idx], scope);
if error.is_none() {
error = err;
}
working_exprs.push(lhs_working_expr);
idx += 1;
prec.push(0);
while idx < lite_args.len() {
let (op, err) = parse_operator(&lite_args[idx]);
if error.is_none() {
error = err;
}
idx += 1;
if idx == lite_args.len() {
if error.is_none() {
error = Some(ParseError::argument_error(
lite_args[idx - 1].clone(),
ArgumentError::MissingMandatoryPositional("right hand side".into()),
));
}
working_exprs.push((None, garbage(op.span)));
working_exprs.push((None, garbage(op.span)));
prec.push(0);
break;
}
trace!(
"idx: {} working_exprs: {:#?} prec: {:?}",
idx,
working_exprs,
prec
);
let (rhs_working_expr, err) = parse_possibly_parenthesized(&lite_args[idx], scope);
if error.is_none() {
error = err;
}
let next_prec = op.precedence();
if !prec.is_empty() && next_prec > *prec.last().expect("this shouldn't happen") {
prec.push(next_prec);
working_exprs.push((None, op));
working_exprs.push(rhs_working_expr);
idx += 1;
continue;
}
while !prec.is_empty()
&& *prec.last().expect("This shouldn't happen") >= next_prec
&& next_prec > 0 // Not garbage
&& working_exprs.len() >= 3
{
// Pop 3 and create and expression, push and repeat
trace!(
"idx: {} working_exprs: {:#?} prec: {:?}",
idx,
working_exprs,
prec
);
let (_, right) = working_exprs.pop().expect("This shouldn't be possible");
let (_, op) = working_exprs.pop().expect("This shouldn't be possible");
let (orig_left, left) = working_exprs.pop().expect("This shouldn't be possible");
// If we're in shorthand mode, we need to reparse the left-hand side if possible
let (left, err) = shorthand_reparse(left, orig_left, scope, shorthand_mode);
if error.is_none() {
error = err;
}
let span = Span::new(left.span.start(), right.span.end());
working_exprs.push((
None,
SpannedExpression {
expr: Expression::Binary(Box::new(Binary { left, op, right })),
span,
},
));
prec.pop();
}
working_exprs.push((None, op));
working_exprs.push(rhs_working_expr);
prec.push(next_prec);
idx += 1;
}
while working_exprs.len() >= 3 {
// Pop 3 and create and expression, push and repeat
let (_, right) = working_exprs.pop().expect("This shouldn't be possible");
let (_, op) = working_exprs.pop().expect("This shouldn't be possible");
let (orig_left, left) = working_exprs.pop().expect("This shouldn't be possible");
let (left, err) = shorthand_reparse(left, orig_left, scope, shorthand_mode);
if error.is_none() {
error = err;
}
let span = Span::new(left.span.start(), right.span.end());
working_exprs.push((
None,
SpannedExpression {
expr: Expression::Binary(Box::new(Binary { left, op, right })),
span,
},
));
}
let (orig_left, left) = working_exprs.pop().expect("This shouldn't be possible");
let (left, err) = shorthand_reparse(left, orig_left, scope, shorthand_mode);
if error.is_none() {
error = err;
}
(incoming_idx + idx, left, error)
}
/// Handles parsing the positional arguments as a batch
/// This allows us to check for times where multiple arguments are treated as one shape, as is the case with SyntaxShape::Math
fn parse_positional_argument(
idx: usize,
lite_cmd: &LiteCommand,
positional_type: &PositionalType,
remaining_positionals: usize,
scope: &dyn ParserScope,
) -> (usize, SpannedExpression, Option<ParseError>) {
let mut idx = idx;
let mut error = None;
let arg = match positional_type {
PositionalType::Mandatory(_, SyntaxShape::MathExpression)
| PositionalType::Optional(_, SyntaxShape::MathExpression) => {
let end_idx = if (lite_cmd.parts.len() - 1) > remaining_positionals {
lite_cmd.parts.len() - remaining_positionals
} else {
lite_cmd.parts.len()
};
let (new_idx, arg, err) =
parse_math_expression(idx, &lite_cmd.parts[idx..end_idx], scope, false);
let span = arg.span;
let mut commands = hir::Pipeline::new(span);
commands.push(ClassifiedCommand::Expr(Box::new(arg)));
let block = hir::Block::new(
Signature::new("<initializer>"),
vec![Group::new(vec![commands], lite_cmd.span())],
IndexMap::new(),
span,
);
let arg = SpannedExpression::new(Expression::Block(Arc::new(block)), span);
idx = new_idx - 1;
if error.is_none() {
error = err;
}
arg
}
PositionalType::Mandatory(_, SyntaxShape::RowCondition)
| PositionalType::Optional(_, SyntaxShape::RowCondition) => {
// A condition can take up multiple arguments, as we build the operation as <arg> <operator> <arg>
// We need to do this here because in parse_arg, we have access to only one arg at a time
if idx < lite_cmd.parts.len() {
if lite_cmd.parts[idx].item.starts_with('{') {
// It's an explicit math expression, so parse it deeper in
let (arg, err) =
parse_arg(SyntaxShape::RowCondition, scope, &lite_cmd.parts[idx]);
if error.is_none() {
error = err;
}
arg
} else {
let end_idx = if (lite_cmd.parts.len() - 1) > remaining_positionals {
lite_cmd.parts.len() - remaining_positionals
} else {
lite_cmd.parts.len()
};
let (new_idx, arg, err) =
parse_math_expression(idx, &lite_cmd.parts[idx..end_idx], scope, true);
let span = arg.span;
let mut commands = hir::Pipeline::new(span);
commands.push(ClassifiedCommand::Expr(Box::new(arg)));
let mut block = hir::Block::new(
Signature::new("<cond>"),
vec![Group::new(vec![commands], lite_cmd.span())],
IndexMap::new(),
span,
);
block.infer_params();
let arg = SpannedExpression::new(Expression::Block(Arc::new(block)), span);
idx = new_idx - 1;
if error.is_none() {
error = err;
}
arg
}
} else {
if error.is_none() {
error = Some(ParseError::argument_error(
lite_cmd.parts[0].clone(),
ArgumentError::MissingMandatoryPositional("condition".into()),
))
}
garbage(lite_cmd.span())
}
}
PositionalType::Mandatory(_, shape) | PositionalType::Optional(_, shape) => {
let (arg, err) = parse_arg(*shape, scope, &lite_cmd.parts[idx]);
if error.is_none() {
error = err;
}
arg
}
};
(idx, arg, error)
}
/// Does a full parse of an internal command using the lite-ly parse command as a starting point
/// This main focus at this level is to understand what flags were passed in, what positional arguments were passed in, what rest arguments were passed in
/// and to ensure that the basic requirements in terms of number of each were met.
fn parse_internal_command(
lite_cmd: &LiteCommand,
scope: &dyn ParserScope,
signature: &Signature,
mut idx: usize,
) -> (InternalCommand, Option<ParseError>) {
// This is a known internal command, so we need to work with the arguments and parse them according to the expected types
let (name, name_span) = (
lite_cmd.parts[0..(idx + 1)]
.iter()
.map(|x| x.item.clone())
.collect::<Vec<String>>()
.join(" "),
Span::new(
lite_cmd.parts[0].span.start(),
lite_cmd.parts[idx].span.end(),
),
);
let mut internal_command = InternalCommand::new(name, name_span, lite_cmd.span());
internal_command.args.set_initial_flags(signature);
let mut current_positional = 0;
let mut named = NamedArguments::new();
let mut positional = vec![];
let mut error = None;
idx += 1; // Start where the arguments begin
while idx < lite_cmd.parts.len() {
if lite_cmd.parts[idx].item.starts_with('-') && lite_cmd.parts[idx].item.len() > 1 {
let (named_types, err) = super::flag::get_flag_signature_spec(
signature,
&internal_command,
&lite_cmd.parts[idx],
);
if err.is_none() {
for (full_name, named_type) in &named_types {
match named_type {
NamedType::Mandatory(_, shape) | NamedType::Optional(_, shape) => {
if lite_cmd.parts[idx].item.contains('=') {
let mut offset = 0;
let value = lite_cmd.parts[idx]
.item
.chars()
.skip_while(|prop| {
offset += 1;
*prop != '='
})
.nth(1);
offset = if value.is_none() { offset - 1 } else { offset };
let flag_value = Span::new(
lite_cmd.parts[idx].span.start() + offset,
lite_cmd.parts[idx].span.end(),
);
let value = lite_cmd.parts[idx].item[offset..]
.to_string()
.spanned(flag_value);
let (arg, err) = parse_arg(*shape, scope, &value);
named.insert_mandatory(
full_name.clone(),
lite_cmd.parts[idx].span,
arg,
);
if error.is_none() {
error = err;
}
} else if idx == lite_cmd.parts.len() {
// Oops, we're missing the argument to our named argument
if error.is_none() {
error = Some(ParseError::argument_error(
lite_cmd.parts[0].clone(),
ArgumentError::MissingValueForName(format!("{:?}", shape)),
));
}
} else {
idx += 1;
if lite_cmd.parts.len() > idx {
let (arg, err) = parse_arg(*shape, scope, &lite_cmd.parts[idx]);
named.insert_mandatory(
full_name.clone(),
lite_cmd.parts[idx - 1].span,
arg,
);
if error.is_none() {
error = err;
}
} else if error.is_none() {
error = Some(ParseError::argument_error(
lite_cmd.parts[0].clone(),
ArgumentError::MissingValueForName(full_name.to_owned()),
));
}
}
}
NamedType::Switch(_) => {
named.insert_switch(
full_name.clone(),
Some(Flag::new(FlagKind::Longhand, lite_cmd.parts[idx].span)),
);
}
}
}
} else {
positional.push(garbage(lite_cmd.parts[idx].span));
if error.is_none() {
error = err;
}
}
} else if signature.positional.len() > current_positional {
let arg = {
let (new_idx, expr, err) = parse_positional_argument(
idx,
lite_cmd,
&signature.positional[current_positional].0,
signature.positional.len() - current_positional - 1,
scope,
);
idx = new_idx;
if error.is_none() {
error = err;
}
expr
};
positional.push(arg);
current_positional += 1;
} else if let Some((rest_type, _)) = &signature.rest_positional {
let (arg, err) = parse_arg(*rest_type, scope, &lite_cmd.parts[idx]);
if error.is_none() {
error = err;
}
positional.push(arg);
current_positional += 1;
} else {
positional.push(garbage(lite_cmd.parts[idx].span));
if error.is_none() {
error = Some(ParseError::argument_error(
lite_cmd.parts[0].clone(),
ArgumentError::UnexpectedArgument(lite_cmd.parts[idx].clone()),
));
}
}
idx += 1;
}
// Count the required positional arguments and ensure these have been met
let mut required_arg_count = 0;
for positional_arg in &signature.positional {
if let PositionalType::Mandatory(_, _) = positional_arg.0 {
required_arg_count += 1;
}
}
if positional.len() < required_arg_count && error.is_none() {
// to make "command -h" work even if required arguments are missing
if !named.named.contains_key("help") {
let (_, name) = &signature.positional[positional.len()];
error = Some(ParseError::argument_error(
lite_cmd.parts[0].clone(),
ArgumentError::MissingMandatoryPositional(name.to_owned()),
));
}
}
if !named.is_empty() {
internal_command.args.named = Some(named);
}
if !positional.is_empty() {
internal_command.args.positional = Some(positional);
}
(internal_command, error)
}
fn parse_external_call(
lite_cmd: &LiteCommand,
end_of_pipeline: bool,
scope: &dyn ParserScope,
) -> (Option<ClassifiedCommand>, Option<ParseError>) {
let mut error = None;
let name = lite_cmd.parts[0].clone().map(|v| {
let trimmed = Cow::Owned(trim_quotes(&v));
expand_path_string(trimmed).to_string()
});
let mut args = vec![];
let (name, err) = parse_arg(SyntaxShape::String, scope, &name);
let name_span = name.span;
if error.is_none() {
error = err;
}
args.push(name);
for lite_arg in &lite_cmd.parts[1..] {
let (expr, err) = parse_external_arg(lite_arg, scope);
if error.is_none() {
error = err;
}
args.push(expr);
}
(
Some(ClassifiedCommand::Internal(InternalCommand {
name: "run_external".to_string(),
name_span,
args: hir::Call {
head: Box::new(SpannedExpression {
expr: Expression::string("run_external".to_string()),
span: name_span,
}),
positional: Some(args),
named: None,
span: name_span,
external_redirection: if end_of_pipeline {
ExternalRedirection::None
} else {
ExternalRedirection::Stdout
},
},
})),
error,
)
}
fn parse_value_call(
call: LiteCommand,
scope: &dyn ParserScope,
) -> (Option<ClassifiedCommand>, Option<ParseError>) {
let mut err = None;
let (head, error) = parse_arg(SyntaxShape::Block, scope, &call.parts[0]);
let mut span = head.span;
if err.is_none() {
err = error;
}
let mut args = vec![];
for arg in call.parts.iter().skip(1) {
let (arg, error) = parse_arg(SyntaxShape::Any, scope, arg);
if err.is_none() {
err = error;
}
span = span.until(arg.span);
args.push(arg);
}
(
Some(ClassifiedCommand::Dynamic(hir::Call {
head: Box::new(head),
positional: Some(args),
named: None,
span,
external_redirection: ExternalRedirection::None,
})),
err,
)
}
fn expand_aliases_in_call(call: &mut LiteCommand, scope: &dyn ParserScope) {
if let Some(name) = call.parts.get(0) {
if let Some(mut expansion) = scope.get_alias(name) {
// set the expansion's spans to point to the alias itself
for item in expansion.iter_mut() {
item.span = name.span;
}
// replace the alias with the expansion
call.parts.remove(0);
expansion.append(&mut call.parts);
call.parts = expansion;
}
}
}
fn parse_call(
mut lite_cmd: LiteCommand,
end_of_pipeline: bool,
scope: &dyn ParserScope,
) -> (Option<ClassifiedCommand>, Option<ParseError>) {
expand_aliases_in_call(&mut lite_cmd, scope);
let mut error = None;
if lite_cmd.parts.is_empty() {
return (None, None);
} else if lite_cmd.parts[0].item.starts_with('^') {
let mut name = lite_cmd.parts[0]
.clone()
.map(|v| v.chars().skip(1).collect::<String>());
name.span = Span::new(name.span.start() + 1, name.span.end());
// TODO this is the same as the `else` branch below, only the name differs. Find a way
// to share this functionality.
let mut args = vec![];
let (name, err) = parse_arg(SyntaxShape::String, scope, &name);
let name_span = name.span;
if error.is_none() {
error = err;
}
args.push(name);
for lite_arg in &lite_cmd.parts[1..] {
let (expr, err) = parse_external_arg(lite_arg, scope);
if error.is_none() {
error = err;
}
args.push(expr);
}
return (
Some(ClassifiedCommand::Internal(InternalCommand {
name: "run_external".to_string(),
name_span,
args: hir::Call {
head: Box::new(SpannedExpression {
expr: Expression::string("run_external".to_string()),
span: name_span,
}),
positional: Some(args),
named: None,
span: name_span,
external_redirection: if end_of_pipeline {
ExternalRedirection::None
} else {
ExternalRedirection::Stdout
},
},
})),
error,
);
} else if lite_cmd.parts[0].item.starts_with('{') {
return parse_value_call(lite_cmd, scope);
} else if lite_cmd.parts[0].item.starts_with('$')
|| lite_cmd.parts[0].item.starts_with('\"')
|| lite_cmd.parts[0].item.starts_with('\'')
|| (lite_cmd.parts[0].item.starts_with('-')
&& parse_arg(SyntaxShape::Number, scope, &lite_cmd.parts[0])
.1
.is_none())
|| (lite_cmd.parts[0].item.starts_with('-')
&& parse_arg(SyntaxShape::Range, scope, &lite_cmd.parts[0])
.1
.is_none())
|| lite_cmd.parts[0].item.starts_with('0')
|| lite_cmd.parts[0].item.starts_with('1')
|| lite_cmd.parts[0].item.starts_with('2')
|| lite_cmd.parts[0].item.starts_with('3')
|| lite_cmd.parts[0].item.starts_with('4')
|| lite_cmd.parts[0].item.starts_with('5')
|| lite_cmd.parts[0].item.starts_with('6')
|| lite_cmd.parts[0].item.starts_with('7')
|| lite_cmd.parts[0].item.starts_with('8')
|| lite_cmd.parts[0].item.starts_with('9')
|| lite_cmd.parts[0].item.starts_with('[')
|| lite_cmd.parts[0].item.starts_with('(')
{
let (_, expr, err) = parse_math_expression(0, &lite_cmd.parts[..], scope, false);
error = error.or(err);
return (Some(ClassifiedCommand::Expr(Box::new(expr))), error);
} else if lite_cmd.parts.len() > 1 {
// FIXME: only build up valid subcommands instead of all arguments
// by checking each part to see if it's a valid identifier name
let mut parts: Vec<_> = lite_cmd.parts.clone().into_iter().map(|x| x.item).collect();
while parts.len() > 1 {
// Check if it's a sub-command
if let Some(signature) = scope.get_signature(&parts.join(" ")) {
let (mut internal_command, err) =
parse_internal_command(&lite_cmd, scope, &signature, parts.len() - 1);
error = error.or(err);
internal_command.args.external_redirection = if end_of_pipeline {
ExternalRedirection::None
} else {
ExternalRedirection::Stdout
};
return (Some(ClassifiedCommand::Internal(internal_command)), error);
}
parts.pop();
}
}
// Check if it's an internal command
if let Some(signature) = scope.get_signature(&lite_cmd.parts[0].item) {
if lite_cmd.parts[0].item == "def" {
let err = parse_definition(&lite_cmd, scope);
error = error.or(err);
}
let (mut internal_command, err) = parse_internal_command(&lite_cmd, scope, &signature, 0);
if internal_command.name == "source" {
if lite_cmd.parts.len() != 2 {
return (
Some(ClassifiedCommand::Internal(internal_command)),
Some(ParseError::argument_error(
lite_cmd.parts[0].clone(),
ArgumentError::MissingMandatoryPositional("a path for sourcing".into()),
)),
);
}
if lite_cmd.parts[1].item.starts_with('$') {
return (
Some(ClassifiedCommand::Internal(internal_command)),
Some(ParseError::mismatch(
"a filepath constant",
lite_cmd.parts[1].clone(),
)),
);
}
let script_path = if let Some(ref positional_args) = internal_command.args.positional {
if let Expression::FilePath(ref p) = positional_args[0].expr {
p
} else {
Path::new(&lite_cmd.parts[1].item)
}
} else {
Path::new(&lite_cmd.parts[1].item)
};
if let Ok(contents) =
std::fs::read_to_string(&expand_path(Cow::Borrowed(Path::new(script_path))))
{
let _ = parse(&contents, 0, scope);
} else {
return (
Some(ClassifiedCommand::Internal(internal_command)),
Some(ParseError::argument_error(
lite_cmd.parts[1].clone(),
ArgumentError::BadValue("can't load source file".into()),
)),
);
}
} else if lite_cmd.parts[0].item == "alias" || lite_cmd.parts[0].item == "unalias" {
let error = parse_alias(&lite_cmd, scope);
if error.is_none() {
return (Some(ClassifiedCommand::Internal(internal_command)), None);
} else {
return (Some(ClassifiedCommand::Internal(internal_command)), error);
}
}
error = error.or(err);
internal_command.args.external_redirection = if end_of_pipeline {
ExternalRedirection::None
} else {
ExternalRedirection::Stdout
};
(Some(ClassifiedCommand::Internal(internal_command)), error)
} else {
parse_external_call(&lite_cmd, end_of_pipeline, scope)
}
}
/// Convert a lite-ly parsed pipeline into a fully classified pipeline, ready to be evaluated.
/// This conversion does error-recovery, so the result is allowed to be lossy. A lossy unit is designated as garbage.
/// Errors are returned as part of a side-car error rather than a Result to allow both error and lossy result simultaneously.
fn parse_pipeline(
lite_pipeline: LitePipeline,
scope: &dyn ParserScope,
) -> (Pipeline, Option<ParseError>) {
let mut commands = Pipeline::new(lite_pipeline.span());
let mut error = None;
let pipeline_len = lite_pipeline.commands.len();
let iter = lite_pipeline.commands.into_iter().peekable();
for lite_cmd in iter.enumerate() {
let (call, err) = parse_call(lite_cmd.1, lite_cmd.0 == (pipeline_len - 1), scope);
if error.is_none() {
error = err;
}
if let Some(call) = call {
if call.has_var_usage("$in") && lite_cmd.0 > 0 {
let call = wrap_with_collect(call, "$in");
commands.push(call);
} else {
commands.push(call);
}
}
}
(commands, error)
}
type SpannedKeyValue = (Spanned<String>, Spanned<String>);
fn wrap_with_collect(call: ClassifiedCommand, var_name: &str) -> ClassifiedCommand {
let mut block = Block::basic();
block.block.push(Group {
pipelines: vec![Pipeline {
list: vec![call],
span: Span::unknown(),
}],
span: Span::unknown(),
});
block.params.positional = vec![(
PositionalType::Mandatory(var_name.into(), SyntaxShape::Any),
format!("implied {}", var_name),
)];
ClassifiedCommand::Internal(InternalCommand {
name: "collect".into(),
name_span: Span::unknown(),
args: Call {
head: Box::new(SpannedExpression {
expr: Expression::Synthetic(Synthetic::String("collect".into())),
span: Span::unknown(),
}),
positional: Some(vec![SpannedExpression {
expr: Expression::Block(Arc::new(block)),
span: Span::unknown(),
}]),
named: None,
span: Span::unknown(),
external_redirection: ExternalRedirection::Stdout,
},
})
}
fn expand_shorthand_forms(
lite_pipeline: &LitePipeline,
) -> (LitePipeline, Option<SpannedKeyValue>, Option<ParseError>) {
if !lite_pipeline.commands.is_empty() {
if lite_pipeline.commands[0].parts[0].contains('=')
&& !lite_pipeline.commands[0].parts[0].starts_with('$')
{
let assignment: Vec<_> = lite_pipeline.commands[0].parts[0].splitn(2, '=').collect();
if assignment.len() != 2 {
(
lite_pipeline.clone(),
None,
Some(ParseError::mismatch(
"environment variable assignment",
lite_pipeline.commands[0].parts[0].clone(),
)),
)
} else {
let original_span = lite_pipeline.commands[0].parts[0].span;
let env_value = trim_quotes(assignment[1]);
let (variable_name, value) = (assignment[0], env_value);
let mut lite_pipeline = lite_pipeline.clone();
if !lite_pipeline.commands[0].parts.len() > 1 {
let mut new_lite_command_parts = lite_pipeline.commands[0].parts.clone();
new_lite_command_parts.remove(0);
lite_pipeline.commands[0].parts = new_lite_command_parts;
(
lite_pipeline,
Some((
variable_name.to_string().spanned(original_span),
value.spanned(original_span),
)),
None,
)
} else {
(
lite_pipeline.clone(),
None,
Some(ParseError::mismatch(
"a command following variable",
lite_pipeline.commands[0].parts[0].clone(),
)),
)
}
}
} else {
(lite_pipeline.clone(), None, None)
}
} else {
(lite_pipeline.clone(), None, None)
}
}
fn parse_alias(call: &LiteCommand, scope: &dyn ParserScope) -> Option<ParseError> {
if call.parts[0].item == "alias" {
if (call.parts.len() == 1)
|| (call.parts.len() == 2
&& (call.parts[1].item == "--help" || (call.parts[1].item == "-h")))
{
return None;
}
if call.parts.len() < 4 {
return Some(ParseError::mismatch("alias", call.parts[0].clone()));
}
if call.parts[0].item != "alias" {
return Some(ParseError::mismatch("alias", call.parts[0].clone()));
}
if call.parts[2].item != "=" {
return Some(ParseError::mismatch("=", call.parts[2].clone()));
}
} else {
// unalias
if call.parts.len() != 2 {
return Some(ParseError::mismatch("unalias", call.parts[0].clone()));
}
}
let name = call.parts[1].item.clone();
let args: Vec<_> = call.parts.iter().skip(3).cloned().collect();
match call.parts[0].item.as_str() {
"alias" => scope.add_alias(&name, args),
"unalias" => {
scope.remove_alias(&name);
}
_ => unreachable!(),
};
None
}
pub fn classify_block(
lite_block: &LiteBlock,
scope: &dyn ParserScope,
) -> (Arc<Block>, Option<ParseError>) {
let mut output = Block::basic();
let mut error = None;
// Check for custom commands first
for group in lite_block.block.iter() {
for pipeline in &group.pipelines {
for call in &pipeline.commands {
if let Some(first) = call.parts.first() {
if first.item == "def" {
if pipeline.commands.len() > 1 && error.is_none() {
error = Some(ParseError::mismatch("definition", first.clone()));
}
parse_definition_prototype(call, scope);
}
}
}
}
}
// Then the rest of the code
for group in &lite_block.block {
let mut out_group = Group::basic();
for pipeline in &group.pipelines {
let mut env_vars = vec![];
let mut pipeline = pipeline.clone();
loop {
if pipeline.commands.is_empty() || pipeline.commands[0].parts.is_empty() {
break;
}
let (pl, vars, err) = expand_shorthand_forms(&pipeline);
if error.is_none() {
error = err;
}
pipeline = pl;
if let Some(vars) = vars {
env_vars.push(vars);
} else {
break;
}
}
let pipeline_span = pipeline.span();
let (mut out_pipe, err) = parse_pipeline(pipeline, scope);
if error.is_none() {
error = err;
}
while let Some(vars) = env_vars.pop() {
let span = pipeline_span;
let block = hir::Block::new(
Signature::new("<block>"),
vec![Group::new(vec![out_pipe.clone()], span)],
IndexMap::new(),
span,
);
let mut call = hir::Call::new(
Box::new(SpannedExpression {
expr: Expression::string("with-env".to_string()),
span,
}),
span,
);
call.positional = Some(vec![
SpannedExpression {
expr: Expression::List(vec![
SpannedExpression {
expr: Expression::string(vars.0.item),
span: vars.0.span,
},
SpannedExpression {
expr: Expression::string(vars.1.item),
span: vars.1.span,
},
]),
span: Span::new(vars.0.span.start(), vars.1.span.end()),
},
SpannedExpression {
expr: Expression::Block(Arc::new(block)),
span,
},
]);
let classified_with_env = ClassifiedCommand::Internal(InternalCommand {
name: "with-env".to_string(),
name_span: Span::unknown(),
args: call,
});
out_pipe = Pipeline {
list: vec![classified_with_env],
span,
};
}
if !out_pipe.list.is_empty() {
out_group.push(out_pipe);
}
}
if !out_group.pipelines.is_empty() {
output.push(out_group);
}
}
let definitions = scope.get_definitions();
for definition in definitions.into_iter() {
let name = definition.params.name.clone();
if !output.definitions.contains_key(&name) {
output.definitions.insert(name, definition.clone());
}
}
output.infer_params();
(Arc::new(output), error)
}
pub fn parse(
input: &str,
span_offset: usize,
scope: &dyn ParserScope,
) -> (Arc<Block>, Option<ParseError>) {
let mut error = None;
let (output, err) = lex(input, span_offset, NewlineMode::Normal);
if error.is_none() {
error = err;
}
let (lite_block, err) = parse_block(output);
if error.is_none() {
error = err;
}
let (block, err) = classify_block(&lite_block, scope);
if error.is_none() {
error = err;
}
(block, error)
}
#[test]
fn unit_parse_byte_units() {
struct TestCase {
string: String,
value: i64,
unit: Unit,
}
let cases = [
TestCase {
string: String::from("108b"),
value: 108,
unit: Unit::Byte,
},
TestCase {
string: String::from("0B"),
value: 0,
unit: Unit::Byte,
},
TestCase {
string: String::from("10kb"),
value: 10,
unit: Unit::Kilobyte,
},
TestCase {
string: String::from("16KB"),
value: 16,
unit: Unit::Kilobyte,
},
TestCase {
string: String::from("99kB"),
value: 99,
unit: Unit::Kilobyte,
},
TestCase {
string: String::from("27Kb"),
value: 27,
unit: Unit::Kilobyte,
},
TestCase {
string: String::from("11Mb"),
value: 11,
unit: Unit::Megabyte,
},
TestCase {
string: String::from("27mB"),
value: 27,
unit: Unit::Megabyte,
},
TestCase {
string: String::from("811Gb"),
value: 811,
unit: Unit::Gigabyte,
},
TestCase {
string: String::from("27gB"),
value: 27,
unit: Unit::Gigabyte,
},
TestCase {
string: String::from("11Tb"),
value: 11,
unit: Unit::Terabyte,
},
TestCase {
string: String::from("1027tB"),
value: 1027,
unit: Unit::Terabyte,
},
TestCase {
string: String::from("11Pb"),
value: 11,
unit: Unit::Petabyte,
},
TestCase {
string: String::from("27pB"),
value: 27,
unit: Unit::Petabyte,
},
TestCase {
string: String::from("10kib"),
value: 10,
unit: Unit::Kibibyte,
},
TestCase {
string: String::from("123KiB"),
value: 123,
unit: Unit::Kibibyte,
},
TestCase {
string: String::from("24kiB"),
value: 24,
unit: Unit::Kibibyte,
},
TestCase {
string: String::from("10mib"),
value: 10,
unit: Unit::Mebibyte,
},
TestCase {
string: String::from("123MiB"),
value: 123,
unit: Unit::Mebibyte,
},
TestCase {
string: String::from("10gib"),
value: 10,
unit: Unit::Gibibyte,
},
TestCase {
string: String::from("123GiB"),
value: 123,
unit: Unit::Gibibyte,
},
TestCase {
string: String::from("10tib"),
value: 10,
unit: Unit::Tebibyte,
},
TestCase {
string: String::from("123TiB"),
value: 123,
unit: Unit::Tebibyte,
},
TestCase {
string: String::from("10pib"),
value: 10,
unit: Unit::Pebibyte,
},
TestCase {
string: String::from("123PiB"),
value: 123,
unit: Unit::Pebibyte,
},
];
for case in cases.iter() {
let input_len = case.string.len();
let value_len = case.value.to_string().len();
let input = case.string.clone().spanned(Span::new(0, input_len));
let result = parse_filesize(&input);
assert_eq!(result.1, None);
assert_eq!(
result.0.expr,
Expression::unit(
Spanned {
span: Span::new(0, value_len),
item: case.value
},
Spanned {
span: Span::new(value_len, input_len),
item: case.unit
}
)
);
}
}
#[test]
fn unit_parse_byte_units_decimal() {
struct TestCase {
string: String,
value: i64,
value_str: String,
unit: Unit,
}
let cases = [
TestCase {
string: String::from("0.25KB"),
value: 250,
value_str: String::from("0.25"),
unit: Unit::Byte,
},
TestCase {
string: String::from("2.5Mb"),
value: 2500,
value_str: String::from("2.5"),
unit: Unit::Kilobyte,
},
TestCase {
string: String::from("0.5Gb"),
value: 500,
value_str: String::from("0.5"),
unit: Unit::Megabyte,
},
TestCase {
string: String::from("811.5Gb"),
value: 811500,
value_str: String::from("811.5"),
unit: Unit::Megabyte,
},
TestCase {
string: String::from("11.5Tb"),
value: 11500,
value_str: String::from("11.5"),
unit: Unit::Gigabyte,
},
TestCase {
string: String::from("12.5Pb"),
value: 12500,
value_str: String::from("12.5"),
unit: Unit::Terabyte,
},
TestCase {
string: String::from("10.5kib"),
value: 10752,
value_str: String::from("10.5"),
unit: Unit::Byte,
},
TestCase {
string: String::from("0.5mib"),
value: 512,
value_str: String::from("0.5"),
unit: Unit::Kibibyte,
},
TestCase {
string: String::from("3.25gib"),
value: 3328,
value_str: String::from("3.25"),
unit: Unit::Mebibyte,
},
];
for case in cases.iter() {
let input_len = case.string.len();
let value_len = case.value_str.to_string().len();
let input = case.string.clone().spanned(Span::new(0, input_len));
let result = parse_filesize(&input);
assert_eq!(result.1, None);
assert_eq!(
result.0.expr,
Expression::unit(
Spanned {
span: Span::new(0, value_len),
item: case.value
},
Spanned {
span: Span::new(value_len, input_len),
item: case.unit
}
)
);
}
}
| 34.276929 | 154 | 0.484105 |
28c07e7157f561535e5c5fb9209cc816fa31c47b | 52,130 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! # Diagnostics data
//!
//! This library contians the Diagnostics data schema used for inspect, logs and lifecycle. This is
//! the data that the Archive returns on `fuchsia.diagnostics.ArchiveAccessor` reads.
use anyhow::format_err;
use fidl_fuchsia_diagnostics::{DataType, Severity as FidlSeverity};
use serde::{
self,
de::{DeserializeOwned, Deserializer},
Deserialize, Serialize, Serializer,
};
use std::{
borrow::Borrow,
cmp::Ordering,
fmt,
hash::Hash,
ops::{Deref, DerefMut},
str::FromStr,
time::Duration,
};
pub use diagnostics_hierarchy::{
assert_data_tree, hierarchy, tree_assertion, DiagnosticsHierarchy, Property,
};
#[cfg(target_os = "fuchsia")]
mod logs_legacy;
#[cfg(target_os = "fuchsia")]
pub use crate::logs_legacy::*;
const SCHEMA_VERSION: u64 = 1;
const MICROS_IN_SEC: u128 = 1000000;
/// The source of diagnostics data
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
pub enum DataSource {
Unknown,
Inspect,
LifecycleEvent,
Logs,
}
impl Default for DataSource {
fn default() -> Self {
DataSource::Unknown
}
}
/// The type of a lifecycle event exposed by the `fuchsia.diagnostics.ArchiveAccessor`
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
pub enum LifecycleType {
Started,
Stopped,
DiagnosticsReady,
LogSinkConnected,
}
/// Metadata contained in a `DiagnosticsData` object.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
#[serde(untagged)]
pub enum Metadata {
Empty,
Inspect(InspectMetadata),
LifecycleEvent(LifecycleEventMetadata),
Logs(LogsMetadata),
}
impl Default for Metadata {
fn default() -> Self {
Metadata::Empty
}
}
/// A trait implemented by marker types which denote "kinds" of diagnostics data.
pub trait DiagnosticsData {
/// The type of metadata included in results of this type.
type Metadata: DeserializeOwned + Serialize + Clone + Send + 'static;
/// The type of key used for indexing node hierarchies in the payload.
type Key: AsRef<str> + Clone + DeserializeOwned + Eq + FromStr + Hash + Send + 'static;
/// The type of error returned in this metadata.
type Error: Clone;
/// Used to query for this kind of metadata in the ArchiveAccessor.
const DATA_TYPE: DataType;
/// Returns the component URL which generated this value.
fn component_url(metadata: &Self::Metadata) -> Option<&str>;
/// Returns the timestamp at which this value was recorded.
fn timestamp(metadata: &Self::Metadata) -> Timestamp;
/// Returns the errors recorded with this value, if any.
fn errors(metadata: &Self::Metadata) -> &Option<Vec<Self::Error>>;
/// Returns whether any errors are recorded on this value.
fn has_errors(metadata: &Self::Metadata) -> bool {
Self::errors(metadata).as_ref().map(|e| !e.is_empty()).unwrap_or_default()
}
/// Transforms a Metdata string into a errorful metadata, overriding any other
/// errors.
fn override_error(metadata: Self::Metadata, error: String) -> Self::Metadata;
}
/// Lifecycle events track the start, stop, and diagnostics directory readiness of components.
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct Lifecycle;
impl DiagnosticsData for Lifecycle {
type Metadata = LifecycleEventMetadata;
type Key = String;
type Error = LifecycleError;
const DATA_TYPE: DataType = DataType::Lifecycle;
fn component_url(metadata: &Self::Metadata) -> Option<&str> {
metadata.component_url.as_ref().map(|s| s.as_str())
}
fn timestamp(metadata: &Self::Metadata) -> Timestamp {
metadata.timestamp
}
fn errors(metadata: &Self::Metadata) -> &Option<Vec<Self::Error>> {
&metadata.errors
}
fn override_error(metadata: Self::Metadata, error: String) -> Self::Metadata {
LifecycleEventMetadata {
lifecycle_event_type: metadata.lifecycle_event_type,
component_url: metadata.component_url,
timestamp: metadata.timestamp,
errors: Some(vec![LifecycleError { message: error.into() }]),
}
}
}
/// Inspect carries snapshots of data trees hosted by components.
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct Inspect;
impl DiagnosticsData for Inspect {
type Metadata = InspectMetadata;
type Key = String;
type Error = InspectError;
const DATA_TYPE: DataType = DataType::Inspect;
fn component_url(metadata: &Self::Metadata) -> Option<&str> {
metadata.component_url.as_ref().map(|s| s.as_str())
}
fn timestamp(metadata: &Self::Metadata) -> Timestamp {
metadata.timestamp
}
fn errors(metadata: &Self::Metadata) -> &Option<Vec<Self::Error>> {
&metadata.errors
}
fn override_error(metadata: Self::Metadata, error: String) -> Self::Metadata {
InspectMetadata {
filename: metadata.filename,
component_url: metadata.component_url,
timestamp: metadata.timestamp,
errors: Some(vec![InspectError { message: error.into() }]),
}
}
}
/// Logs carry streams of structured events from components.
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct Logs;
impl DiagnosticsData for Logs {
type Metadata = LogsMetadata;
type Key = LogsField;
type Error = LogError;
const DATA_TYPE: DataType = DataType::Logs;
fn component_url(metadata: &Self::Metadata) -> Option<&str> {
metadata.component_url.as_ref().map(|s| s.as_str())
}
fn timestamp(metadata: &Self::Metadata) -> Timestamp {
metadata.timestamp
}
fn errors(metadata: &Self::Metadata) -> &Option<Vec<Self::Error>> {
&metadata.errors
}
fn override_error(metadata: Self::Metadata, error: String) -> Self::Metadata {
LogsMetadata {
severity: metadata.severity,
component_url: metadata.component_url,
timestamp: metadata.timestamp,
errors: Some(vec![LogError::Other { message: error }]),
file: metadata.file,
line: metadata.line,
pid: metadata.pid,
tags: metadata.tags,
tid: metadata.tid,
dropped: None,
size_bytes: None,
}
}
}
/// Wraps a time for serialization and deserialization purposes.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
pub struct Timestamp(i64);
impl fmt::Display for Timestamp {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
// i32 here because it's the default for a bare integer literal w/o a type suffix
impl From<i32> for Timestamp {
fn from(nanos: i32) -> Timestamp {
Timestamp(nanos as i64)
}
}
impl From<i64> for Timestamp {
fn from(nanos: i64) -> Timestamp {
Timestamp(nanos)
}
}
impl Into<i64> for Timestamp {
fn into(self) -> i64 {
self.0
}
}
impl Into<Duration> for Timestamp {
fn into(self) -> Duration {
Duration::from_nanos(self.0 as u64)
}
}
#[cfg(target_os = "fuchsia")]
mod zircon {
use super::*;
use fuchsia_zircon as zx;
impl From<zx::Time> for Timestamp {
fn from(t: zx::Time) -> Timestamp {
Timestamp(t.into_nanos())
}
}
impl Into<zx::Time> for Timestamp {
fn into(self) -> zx::Time {
zx::Time::from_nanos(self.0)
}
}
}
impl Deref for Timestamp {
type Target = i64;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Timestamp {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// The metadata contained in a `DiagnosticsData` object where the data source is
/// `DataSource::LifecycleEvent`.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct LifecycleEventMetadata {
/// Optional vector of errors encountered by platform.
#[serde(skip_serializing_if = "Option::is_none")]
pub errors: Option<Vec<LifecycleError>>,
/// Type of lifecycle event being encoded in the payload.
pub lifecycle_event_type: LifecycleType,
/// The url with which the component was launched.
#[serde(skip_serializing_if = "Option::is_none")]
pub component_url: Option<String>,
/// Monotonic time in nanos.
pub timestamp: Timestamp,
}
/// The metadata contained in a `DiagnosticsData` object where the data source is
/// `DataSource::Inspect`.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct InspectMetadata {
/// Optional vector of errors encountered by platform.
#[serde(skip_serializing_if = "Option::is_none")]
pub errors: Option<Vec<InspectError>>,
/// Name of diagnostics file producing data.
pub filename: String,
/// The url with which the component was launched.
#[serde(skip_serializing_if = "Option::is_none")]
pub component_url: Option<String>,
/// Monotonic time in nanos.
pub timestamp: Timestamp,
}
/// The metadata contained in a `DiagnosticsData` object where the data source is
/// `DataSource::Logs`.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct LogsMetadata {
// TODO(fxbug.dev/58369) figure out exact spelling of pid/tid context and severity
/// Optional vector of errors encountered by platform.
#[serde(skip_serializing_if = "Option::is_none")]
pub errors: Option<Vec<LogError>>,
/// The url with which the component was launched.
#[serde(skip_serializing_if = "Option::is_none")]
pub component_url: Option<String>,
/// Monotonic time in nanos.
pub timestamp: Timestamp,
/// Severity of the message.
pub severity: Severity,
/// Tags to add at the beginning of the message
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<String>>,
/// The process ID
#[serde(skip_serializing_if = "Option::is_none")]
pub pid: Option<u64>,
/// The thread ID
#[serde(skip_serializing_if = "Option::is_none")]
pub tid: Option<u64>,
/// The file name
#[serde(skip_serializing_if = "Option::is_none")]
pub file: Option<String>,
/// The line number
#[serde(skip_serializing_if = "Option::is_none")]
pub line: Option<u64>,
/// Number of dropped messages
/// DEPRECATED: do not set. Left for backwards compatibility with older serialized metadatas
/// that contain this field.
#[serde(skip)]
dropped: Option<u64>,
/// Size of the original message on the wire, in bytes.
/// DEPRECATED: do not set. Left for backwards compatibility with older serialized metadatas
/// that contain this field.
#[serde(skip)]
size_bytes: Option<usize>,
}
/// Severities a log message can have, often called the log's "level".
// NOTE: this is only duplicated because we can't get Serialize/Deserialize on the FIDL type
#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
pub enum Severity {
/// Trace records include detailed information about program execution.
#[serde(rename = "TRACE", alias = "Trace")]
Trace,
/// Debug records include development-facing information about program execution.
#[serde(rename = "DEBUG", alias = "Debug")]
Debug,
/// Info records include general information about program execution. (default)
#[serde(rename = "INFO", alias = "Info")]
Info,
/// Warning records include information about potentially problematic operations.
#[serde(rename = "WARN", alias = "Warn")]
Warn,
/// Error records include information about failed operations.
#[serde(rename = "ERROR", alias = "Error")]
Error,
/// Fatal records convey information about operations which cause a program's termination.
#[serde(rename = "FATAL", alias = "Fatal")]
Fatal,
}
impl fmt::Display for Severity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let repr = match self {
Severity::Trace => "TRACE",
Severity::Debug => "DEBUG",
Severity::Info => "INFO",
Severity::Warn => "WARN",
Severity::Error => "ERROR",
Severity::Fatal => "FATAL",
};
write!(f, "{}", repr)
}
}
impl FromStr for Severity {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.to_lowercase();
match s.as_str() {
"trace" => Ok(Severity::Trace),
"debug" => Ok(Severity::Debug),
"info" => Ok(Severity::Info),
"warn" => Ok(Severity::Warn),
"error" => Ok(Severity::Error),
"fatal" => Ok(Severity::Fatal),
other => Err(format_err!("invalid severity: {}", other)),
}
}
}
impl From<FidlSeverity> for Severity {
fn from(severity: FidlSeverity) -> Self {
match severity {
FidlSeverity::Trace => Severity::Trace,
FidlSeverity::Debug => Severity::Debug,
FidlSeverity::Info => Severity::Info,
FidlSeverity::Warn => Severity::Warn,
FidlSeverity::Error => Severity::Error,
FidlSeverity::Fatal => Severity::Fatal,
}
}
}
/// An instance of diagnostics data with typed metadata and an optional nested payload.
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct Data<D: DiagnosticsData> {
/// The source of the data.
#[serde(default)]
// TODO(fxbug.dev/58033) remove this once the Metadata enum is gone everywhere
pub data_source: DataSource,
/// The metadata for the diagnostics payload.
#[serde(bound(
deserialize = "D::Metadata: DeserializeOwned",
serialize = "D::Metadata: Serialize"
))]
pub metadata: D::Metadata,
/// Moniker of the component that generated the payload.
pub moniker: String,
/// Payload containing diagnostics data, if the payload exists, else None.
pub payload: Option<DiagnosticsHierarchy<D::Key>>,
/// Schema version.
#[serde(default)]
pub version: u64,
}
impl<D> Data<D>
where
D: DiagnosticsData,
{
pub fn dropped_payload_schema(self, error_string: String) -> Data<D>
where
D: DiagnosticsData,
{
Data {
metadata: D::override_error(self.metadata, error_string),
moniker: self.moniker,
data_source: self.data_source,
version: self.version,
payload: None,
}
}
}
/// A diagnostics data object containing inspect data.
pub type InspectData = Data<Inspect>;
/// A diagnostics data object containing lifecycle event data.
pub type LifecycleData = Data<Lifecycle>;
/// A diagnostics data object containing logs data.
pub type LogsData = Data<Logs>;
/// A diagnostics data payload containing logs data.
pub type LogsHierarchy = DiagnosticsHierarchy<LogsField>;
/// A diagnostics hierarchy property keyed by `LogsField`.
pub type LogsProperty = Property<LogsField>;
impl Data<Lifecycle> {
/// Creates a new data instance for a lifecycle event.
pub fn for_lifecycle_event(
moniker: impl Into<String>,
lifecycle_event_type: LifecycleType,
payload: Option<DiagnosticsHierarchy>,
component_url: impl Into<String>,
timestamp: impl Into<Timestamp>,
errors: Vec<LifecycleError>,
) -> LifecycleData {
let errors_opt = if errors.is_empty() { None } else { Some(errors) };
Data {
moniker: moniker.into(),
version: SCHEMA_VERSION,
data_source: DataSource::LifecycleEvent,
payload,
metadata: LifecycleEventMetadata {
timestamp: timestamp.into(),
component_url: Some(component_url.into()),
lifecycle_event_type,
errors: errors_opt,
},
}
}
}
impl Data<Inspect> {
/// Creates a new data instance for inspect.
pub fn for_inspect(
moniker: impl Into<String>,
inspect_hierarchy: Option<DiagnosticsHierarchy>,
timestamp_nanos: impl Into<Timestamp>,
component_url: impl Into<String>,
filename: impl Into<String>,
errors: Vec<InspectError>,
) -> InspectData {
let errors_opt = if errors.is_empty() { None } else { Some(errors) };
Data {
moniker: moniker.into(),
version: SCHEMA_VERSION,
data_source: DataSource::Inspect,
payload: inspect_hierarchy,
metadata: InspectMetadata {
timestamp: timestamp_nanos.into(),
component_url: Some(component_url.into()),
filename: filename.into(),
errors: errors_opt,
},
}
}
}
/// Internal state of the LogsDataBuilder impl
/// External customers should not directly access these fields.
pub struct LogsDataBuilder {
/// List of errors
errors: Vec<LogError>,
/// Message in log
msg: Option<String>,
/// List of tags
tags: Vec<String>,
/// Process ID
pid: Option<u64>,
/// Thread ID
tid: Option<u64>,
/// File name
file: Option<String>,
/// Line number
line: Option<u64>,
/// BuilderArgs that was passed in at construction time
args: BuilderArgs,
/// List of KVPs from the user
keys: Vec<Property<LogsField>>,
/// Printf format string
format: Option<String>,
/// Arguments for printf string
printf_args: Vec<String>,
}
pub struct BuilderArgs {
/// The moniker for the component
pub moniker: String,
/// The timestamp of the message in nanoseconds
pub timestamp_nanos: Timestamp,
/// The component URL
pub component_url: Option<String>,
/// The message severity
pub severity: Severity,
}
impl LogsDataBuilder {
/// Constructs a new LogsDataBuilder
pub fn new(args: BuilderArgs) -> Self {
LogsDataBuilder {
args,
errors: vec![],
msg: None,
file: None,
line: None,
pid: None,
tags: vec![],
tid: None,
keys: vec![],
format: None,
printf_args: vec![],
}
}
/// Sets the number of dropped messages.
/// If value is greater than zero, a DroppedLogs error
/// will also be added to the list of errors or updated if
/// already present.
#[must_use = "You must call build on your builder to consume its result"]
pub fn set_dropped(mut self, value: u64) -> Self {
#[allow(clippy::absurd_extreme_comparisons)] // TODO(fxbug.dev/95023)
if value <= 0 {
return self;
}
let val = self.errors.iter_mut().find_map(|error| {
if let LogError::DroppedLogs { count } = error {
Some(count)
} else {
None
}
});
if let Some(v) = val {
*v = value;
} else {
self.errors.push(LogError::DroppedLogs { count: value });
}
self
}
pub fn set_severity(mut self, severity: Severity) -> Self {
self.args.severity = severity;
self
}
/// Sets the process ID that logged the message
#[must_use = "You must call build on your builder to consume its result"]
pub fn set_pid(mut self, value: u64) -> Self {
self.pid = Some(value);
self
}
/// Sets the thread ID that logged the message
#[must_use = "You must call build on your builder to consume its result"]
pub fn set_tid(mut self, value: u64) -> Self {
self.tid = Some(value);
self
}
/// Constructs a LogsData from this builder
pub fn build(self) -> LogsData {
let mut args = vec![];
if let Some(msg) = self.msg {
args.push(LogsProperty::String(LogsField::MsgStructured, msg));
}
let mut payload_fields = vec![DiagnosticsHierarchy::new("message", args, vec![])];
if !self.keys.is_empty() {
let val = DiagnosticsHierarchy::new("keys", self.keys, vec![]);
payload_fields.push(val);
}
if let Some(format) = self.format {
let val = DiagnosticsHierarchy::new(
"printf".to_string(),
vec![
LogsProperty::String(LogsField::Other("format".to_string()), format),
LogsProperty::StringList(
LogsField::Other("args".to_string()),
self.printf_args,
),
],
vec![],
);
payload_fields.push(val);
}
let mut payload = LogsHierarchy::new("root", vec![], payload_fields);
payload.sort();
let mut ret = LogsData::for_logs(
self.args.moniker,
Some(payload),
self.args.timestamp_nanos,
self.args.component_url,
self.args.severity,
self.errors,
);
ret.metadata.file = self.file;
ret.metadata.line = self.line;
ret.metadata.pid = self.pid;
ret.metadata.tid = self.tid;
ret.metadata.tags = Some(self.tags);
return ret;
}
/// Adds an error
#[must_use = "You must call build on your builder to consume its result"]
pub fn add_error(mut self, error: LogError) -> Self {
self.errors.push(error);
self
}
/// Sets the message to be printed in the log message
#[must_use = "You must call build on your builder to consume its result"]
pub fn set_message(mut self, msg: impl Into<String>) -> Self {
self.msg = Some(msg.into());
self
}
/// Sets the printf format and arguments.
#[must_use = "You must call build on your builder to consume its result"]
pub fn set_format_printf(mut self, format: impl Into<String>, args: Vec<String>) -> Self {
self.format = Some(format.into());
self.printf_args = args;
self
}
/// Sets the file name that printed this message.
#[must_use = "You must call build on your builder to consume its result"]
pub fn set_file(mut self, file: impl Into<String>) -> Self {
self.file = Some(file.into());
self
}
/// Sets the line number that printed this message.
#[must_use = "You must call build on your builder to consume its result"]
pub fn set_line(mut self, line: u64) -> Self {
self.line = Some(line);
self
}
/// Adds a property to the list of key value pairs that are a part of this log message.
#[must_use = "You must call build on your builder to consume its result"]
pub fn add_key(mut self, kvp: Property<LogsField>) -> Self {
self.keys.push(kvp);
self
}
/// Adds a tag to the list of tags that precede this log message.
#[must_use = "You must call build on your builder to consume its result"]
pub fn add_tag(mut self, tag: impl Into<String>) -> Self {
self.tags.push(tag.into());
self
}
}
impl Data<Logs> {
/// Creates a new data instance for logs.
pub fn for_logs(
moniker: impl Into<String>,
payload: Option<LogsHierarchy>,
timestamp_nanos: impl Into<Timestamp>,
component_url: Option<String>,
severity: impl Into<Severity>,
errors: Vec<LogError>,
) -> Self {
let errors = if errors.is_empty() { None } else { Some(errors) };
Data {
moniker: moniker.into(),
version: SCHEMA_VERSION,
data_source: DataSource::Logs,
payload,
metadata: LogsMetadata {
timestamp: timestamp_nanos.into(),
component_url: component_url,
severity: severity.into(),
errors,
file: None,
line: None,
pid: None,
tags: None,
tid: None,
dropped: None,
size_bytes: None,
},
}
}
/// Returns the string log associated with the message, if one exists.
pub fn msg(&self) -> Option<&str> {
self.payload_message().as_ref().and_then(|p| {
p.properties.iter().find_map(|property| match property {
LogsProperty::String(LogsField::MsgStructured, msg) => Some(msg.as_str()),
_ => None,
})
})
}
pub fn msg_mut(&mut self) -> Option<&mut String> {
self.payload_message_mut().and_then(|p| {
p.properties.iter_mut().find_map(|property| match property {
LogsProperty::String(LogsField::MsgStructured, msg) => Some(msg),
_ => None,
})
})
}
pub fn payload_printf_format(&mut self) -> Option<&str> {
self.payload_printf().as_ref().and_then(|p| {
p.properties.iter().find_map(|property| match property {
LogsProperty::String(LogsField::Format, format) => Some(format.as_str()),
_ => None,
})
})
}
pub fn payload_printf_args(&mut self) -> Option<&Vec<String>> {
self.payload_printf().as_ref().and_then(|p| {
p.properties.iter().find_map(|property| match property {
LogsProperty::StringList(LogsField::Args, format) => Some(format),
_ => None,
})
})
}
pub fn payload_printf(&self) -> Option<&DiagnosticsHierarchy<LogsField>> {
self.payload
.as_ref()
.and_then(|p| p.children.iter().find(|property| property.name.as_str() == "printf"))
}
pub fn payload_message(&self) -> Option<&DiagnosticsHierarchy<LogsField>> {
self.payload
.as_ref()
.and_then(|p| p.children.iter().find(|property| property.name.as_str() == "message"))
}
pub fn payload_keys(&self) -> Option<&DiagnosticsHierarchy<LogsField>> {
self.payload
.as_ref()
.and_then(|p| p.children.iter().find(|property| property.name.as_str() == "keys"))
}
/// Returns an iterator over the payload keys as strings with the format "key=value".
pub fn payload_keys_strings(&self) -> Box<dyn Iterator<Item = String> + '_> {
let maybe_iter = self.payload_keys().map(|p| {
Box::new(p.properties.iter().filter_map(|property| match property {
LogsProperty::String(LogsField::Tag, _tag) => None,
LogsProperty::String(LogsField::ProcessId, _tag) => None,
LogsProperty::String(LogsField::ThreadId, _tag) => None,
LogsProperty::String(LogsField::Verbosity, _tag) => None,
LogsProperty::String(LogsField::Dropped, _tag) => None,
LogsProperty::String(LogsField::Msg, _tag) => None,
LogsProperty::String(LogsField::FilePath, _tag) => None,
LogsProperty::String(LogsField::LineNumber, _tag) => None,
LogsProperty::String(LogsField::Other(key), value) => {
Some(format!("{}={}", key.to_string(), value))
}
LogsProperty::Bytes(LogsField::Other(key), _) => {
Some(format!("{} = <bytes>", key.to_string()))
}
LogsProperty::Int(LogsField::Other(key), value) => {
Some(format!("{}={}", key.to_string(), value))
}
LogsProperty::Uint(LogsField::Other(key), value) => {
Some(format!("{}={}", key.to_string(), value))
}
LogsProperty::Double(LogsField::Other(key), value) => {
Some(format!("{}={}", key.to_string(), value))
}
LogsProperty::Bool(LogsField::Other(key), value) => {
Some(format!("{}={}", key.to_string(), value))
}
LogsProperty::DoubleArray(LogsField::Other(key), value) => {
Some(format!("{}={:?}", key.to_string(), value))
}
LogsProperty::IntArray(LogsField::Other(key), value) => {
Some(format!("{}={:?}", key.to_string(), value))
}
LogsProperty::UintArray(LogsField::Other(key), value) => {
Some(format!("{}={:?}", key.to_string(), value))
}
LogsProperty::StringList(LogsField::Other(key), value) => {
Some(format!("{}={:?}", key.to_string(), value))
}
_ => None,
}))
});
match maybe_iter {
Some(i) => Box::new(i),
None => Box::new(std::iter::empty()),
}
}
pub fn payload_message_mut(&mut self) -> Option<&mut DiagnosticsHierarchy<LogsField>> {
self.payload.as_mut().and_then(|p| {
p.children.iter_mut().find(|property| property.name.as_str() == "message")
})
}
/// Returns the file path associated with the message, if one exists.
pub fn file_path(&self) -> Option<&str> {
self.metadata.file.as_ref().map(|file| file.as_str())
}
/// Returns the line number associated with the message, if one exists.
pub fn line_number(&self) -> Option<&u64> {
self.metadata.line.as_ref()
}
/// Returns the pid associated with the message, if one exists.
pub fn pid(&self) -> Option<u64> {
self.metadata.pid
}
/// Returns the tid associated with the message, if one exists.
pub fn tid(&self) -> Option<u64> {
self.metadata.tid
}
/// Returns the tags associated with the message, if any exist.
pub fn tags(&self) -> Option<&Vec<String>> {
self.metadata.tags.as_ref()
}
/// The message's severity.
#[cfg(target_os = "fuchsia")]
pub fn legacy_severity(&self) -> LegacySeverity {
if let Some(verbosity) = self.verbosity() {
LegacySeverity::Verbose(verbosity)
} else {
match self.metadata.severity {
Severity::Trace => LegacySeverity::Trace,
Severity::Debug => LegacySeverity::Debug,
Severity::Info => LegacySeverity::Info,
Severity::Warn => LegacySeverity::Warn,
Severity::Error => LegacySeverity::Error,
Severity::Fatal => LegacySeverity::Fatal,
}
}
}
/// Returns number of dropped logs if reported in the message.
pub fn dropped_logs(&self) -> Option<u64> {
self.metadata
.errors
.as_ref()
.map(|errors| {
errors.iter().find_map(|e| match e {
LogError::DroppedLogs { count } => Some(*count),
_ => None,
})
})
.flatten()
}
/// Returns number of rolled out logs if reported in the message.
pub fn rolled_out_logs(&self) -> Option<u64> {
self.metadata
.errors
.as_ref()
.map(|errors| {
errors.iter().find_map(|e| match e {
LogError::RolledOutLogs { count } => Some(*count),
_ => None,
})
})
.flatten()
}
pub fn verbosity(&self) -> Option<i8> {
self.payload_message().and_then(|payload| {
payload
.properties
.iter()
.filter_map(|property| match property {
LogsProperty::Int(LogsField::Verbosity, verbosity) => Some(*verbosity as i8),
_ => None,
})
.next()
})
}
pub fn set_legacy_verbosity(&mut self, legacy: i8) {
if let Some(payload_message) = self.payload_message_mut() {
payload_message.properties.push(LogsProperty::Int(LogsField::Verbosity, legacy.into()));
}
}
#[cfg(target_os = "fuchsia")]
pub(crate) fn non_legacy_contents(&self) -> Box<dyn Iterator<Item = &LogsProperty> + '_> {
match self.payload_keys() {
None => Box::new(std::iter::empty()),
Some(payload) => Box::new(payload.properties.iter()),
}
}
/// Returns the component nam. This only makes sense for v1 components.
pub fn component_name(&self) -> &str {
self.moniker.rsplit("/").next().unwrap_or("UNKNOWN")
}
}
impl fmt::Display for Data<Logs> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Multiple tags are supported for the `LogMessage` format and are represented
// as multiple instances of LogsField::Tag arguments.
let kvps = self.payload_keys_strings();
let time: Duration = self.metadata.timestamp.into();
write!(
f,
"[{:05}.{:06}][{}][{}][{}]",
time.as_secs(),
time.as_micros() % MICROS_IN_SEC,
self.pid().map(|s| s.to_string()).unwrap_or("".to_string()),
self.tid().map(|s| s.to_string()).unwrap_or("".to_string()),
self.moniker,
)?;
match &self.metadata.tags {
Some(tags) if !tags.is_empty() => {
write!(f, "[{}]", tags.join(","))?;
}
_ => {}
}
write!(f, " {}:", self.metadata.severity)?;
if let (Some(file), Some(line)) = (&self.metadata.file, &self.metadata.line) {
write!(f, " [{}({})]", file, line)?;
}
write!(f, " {}", self.msg().unwrap_or(""))?;
for kvp in kvps {
write!(f, " {}", kvp)?;
}
Ok(())
}
}
impl Eq for Data<Logs> {}
impl PartialOrd for Data<Logs> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Data<Logs> {
fn cmp(&self, other: &Self) -> Ordering {
self.metadata.timestamp.cmp(&other.metadata.timestamp)
}
}
/// An enum containing well known argument names passed through logs, as well
/// as an `Other` variant for any other argument names.
///
/// This contains the fields of logs sent as a [`LogMessage`].
///
/// [`LogMessage`]: https://fuchsia.dev/reference/fidl/fuchsia.logger#LogMessage
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize)]
pub enum LogsField {
ProcessId,
ThreadId,
Dropped,
Tag,
Verbosity,
Msg,
MsgStructured,
FilePath,
LineNumber,
Args,
Format,
Other(String),
}
// TODO(fxbug.dev/50519) - ensure that strings reported here align with naming
// decisions made for the structured log format sent by other components.
pub const PID_LABEL: &str = "pid";
pub const TID_LABEL: &str = "tid";
pub const DROPPED_LABEL: &str = "num_dropped";
pub const TAG_LABEL: &str = "tag";
pub const MESSAGE_LABEL_STRUCTURED: &str = "value";
pub const MESSAGE_LABEL: &str = "message";
pub const FORMAT_LABEL: &str = "format";
pub const ARGS_LABEL: &str = "args";
pub const VERBOSITY_LABEL: &str = "verbosity";
pub const FILE_PATH_LABEL: &str = "file";
pub const LINE_NUMBER_LABEL: &str = "line";
impl LogsField {
/// Whether the logs field is legacy or not.
pub fn is_legacy(&self) -> bool {
matches!(
self,
LogsField::ProcessId
| LogsField::ThreadId
| LogsField::Dropped
| LogsField::Tag
| LogsField::Msg
| LogsField::Verbosity
)
}
}
impl AsRef<str> for LogsField {
fn as_ref(&self) -> &str {
match self {
Self::ProcessId => PID_LABEL,
Self::ThreadId => TID_LABEL,
Self::Dropped => DROPPED_LABEL,
Self::Tag => TAG_LABEL,
Self::Msg => MESSAGE_LABEL,
Self::Verbosity => VERBOSITY_LABEL,
Self::FilePath => FILE_PATH_LABEL,
Self::LineNumber => LINE_NUMBER_LABEL,
Self::MsgStructured => MESSAGE_LABEL_STRUCTURED,
Self::Args => MESSAGE_LABEL,
Self::Format => FORMAT_LABEL,
Self::Other(str) => str.as_str(),
}
}
}
impl<T> From<T> for LogsField
where
// Deref instead of AsRef b/c LogsField: AsRef<str> so this conflicts with concrete From<Self>
T: Deref<Target = str>,
{
fn from(s: T) -> Self {
match s.as_ref() {
PID_LABEL => Self::ProcessId,
TID_LABEL => Self::ThreadId,
DROPPED_LABEL => Self::Dropped,
VERBOSITY_LABEL => Self::Verbosity,
TAG_LABEL => Self::Tag,
MESSAGE_LABEL => Self::Msg,
FILE_PATH_LABEL => Self::FilePath,
LINE_NUMBER_LABEL => Self::LineNumber,
MESSAGE_LABEL_STRUCTURED => Self::MsgStructured,
FORMAT_LABEL => Self::Format,
ARGS_LABEL => Self::Args,
_ => Self::Other(s.to_string()),
}
}
}
impl FromStr for LogsField {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::from(s))
}
}
/// Possible errors that can come in a `DiagnosticsData` object where the data source is
/// `DataSource::Logs`.
#[derive(Clone, Deserialize, Debug, Eq, PartialEq, Serialize)]
pub enum LogError {
/// Represents the number of logs that were dropped by the component writing the logs due to an
/// error writing to the socket before succeeding to write a log.
#[serde(rename = "dropped_logs")]
DroppedLogs { count: u64 },
/// Represents the number of logs that were dropped for a component by the archivist due to the
/// log buffer execeeding its maximum capacity before the current message.
#[serde(rename = "rolled_out_logs")]
RolledOutLogs { count: u64 },
#[serde(rename = "parse_record")]
FailedToParseRecord(String),
#[serde(rename = "other")]
Other { message: String },
}
/// Possible error that can come in a `DiagnosticsData` object where the data source is
/// `DataSource::Inspect`..
#[derive(Debug, PartialEq, Clone, Eq)]
pub struct InspectError {
pub message: String,
}
/// Possible error that can come in a `DiagnosticsData` object where the data source is
/// `DataSource::LifecycleEvent`.
#[derive(Debug, PartialEq, Clone, Eq)]
pub struct LifecycleError {
pub message: String,
}
impl fmt::Display for LifecycleError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.message)
}
}
impl fmt::Display for InspectError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.message)
}
}
impl Borrow<str> for InspectError {
fn borrow(&self) -> &str {
&self.message
}
}
impl Serialize for LifecycleError {
fn serialize<S: Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> {
self.message.serialize(ser)
}
}
impl Borrow<str> for LifecycleError {
fn borrow(&self) -> &str {
&self.message
}
}
impl Serialize for InspectError {
fn serialize<S: Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> {
self.message.serialize(ser)
}
}
impl<'de> Deserialize<'de> for InspectError {
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let message = String::deserialize(de)?;
Ok(Self { message })
}
}
impl<'de> Deserialize<'de> for LifecycleError {
fn deserialize<D>(de: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let message = String::deserialize(de)?;
Ok(Self { message })
}
}
impl Metadata {
/// Returns the inspect metadata or None if the metadata contained is not for inspect.
pub fn inspect(&self) -> Option<&InspectMetadata> {
match self {
Metadata::Inspect(m) => Some(m),
_ => None,
}
}
/// Returns the lifecycle event metadata or None if the metadata contained is not for a
/// lifecycle event.
pub fn lifecycle_event(&self) -> Option<&LifecycleEventMetadata> {
match self {
Metadata::LifecycleEvent(m) => Some(m),
_ => None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use diagnostics_hierarchy::hierarchy;
use serde_json::json;
const TEST_URL: &'static str = "fuchsia-pkg://test";
#[fuchsia::test]
fn test_canonical_json_inspect_formatting() {
let mut hierarchy = hierarchy! {
root: {
x: "foo",
}
};
hierarchy.sort();
let json_schema = Data::for_inspect(
"a/b/c/d",
Some(hierarchy),
123456i64,
TEST_URL,
"test_file_plz_ignore.inspect",
Vec::new(),
);
let result_json =
serde_json::to_value(&json_schema).expect("serialization should succeed.");
let expected_json = json!({
"moniker": "a/b/c/d",
"version": 1,
"data_source": "Inspect",
"payload": {
"root": {
"x": "foo"
}
},
"metadata": {
"component_url": TEST_URL,
"filename": "test_file_plz_ignore.inspect",
"timestamp": 123456,
}
});
pretty_assertions::assert_eq!(result_json, expected_json, "golden diff failed.");
}
#[fuchsia::test]
fn test_errorful_json_inspect_formatting() {
let json_schema = Data::for_inspect(
"a/b/c/d",
None,
123456i64,
TEST_URL,
"test_file_plz_ignore.inspect",
vec![InspectError { message: "too much fun being had.".to_string() }],
);
let result_json =
serde_json::to_value(&json_schema).expect("serialization should succeed.");
let expected_json = json!({
"moniker": "a/b/c/d",
"version": 1,
"data_source": "Inspect",
"payload": null,
"metadata": {
"component_url": TEST_URL,
"errors": ["too much fun being had."],
"filename": "test_file_plz_ignore.inspect",
"timestamp": 123456,
}
});
pretty_assertions::assert_eq!(result_json, expected_json, "golden diff failed.");
}
#[fuchsia::test]
fn default_builder_test() {
let builder = LogsDataBuilder::new(BuilderArgs {
component_url: Some("url".to_string()),
moniker: String::from("moniker"),
severity: Severity::Info,
timestamp_nanos: 0.into(),
});
//let tree = builder.build();
let expected_json = json!({
"moniker": "moniker",
"version": 1,
"data_source": "Logs",
"payload": {
"root":
{
"message":{}
}
},
"metadata": {
"component_url": "url",
"severity": "INFO",
"tags": [],
"timestamp": 0,
}
});
let result_json =
serde_json::to_value(&builder.build()).expect("serialization should succeed.");
pretty_assertions::assert_eq!(result_json, expected_json, "golden diff failed.");
}
#[fuchsia::test]
fn regular_message_test() {
let builder = LogsDataBuilder::new(BuilderArgs {
component_url: Some("url".to_string()),
moniker: String::from("moniker"),
severity: Severity::Info,
timestamp_nanos: 0.into(),
})
.set_message("app")
.set_file("test file.cc")
.set_line(420)
.set_pid(1001)
.set_tid(200)
.set_dropped(2)
.add_tag("You're")
.add_tag("IT!")
.add_key(LogsProperty::String(LogsField::Other("key".to_string()), "value".to_string()));
// TODO (http://fxbug.dev/77054): Convert to our custom DSL when possible.
let expected_json = json!({
"moniker": "moniker",
"version": 1,
"data_source": "Logs",
"payload": {
"root":
{
"keys":{
"key":"value"
},
"message":{
"value":"app"
}
}
},
"metadata": {
"errors": [],
"component_url": "url",
"errors": [{"dropped_logs":{"count":2}}],
"file": "test file.cc",
"line": 420,
"pid": 1001,
"severity": "INFO",
"tags": ["You're", "IT!"],
"tid": 200,
"timestamp": 0,
}
});
let result_json =
serde_json::to_value(&builder.build()).expect("serialization should succeed.");
pretty_assertions::assert_eq!(result_json, expected_json, "golden diff failed.");
}
#[fuchsia::test]
fn printf_test() {
let builder = LogsDataBuilder::new(BuilderArgs {
component_url: Some("url".to_string()),
moniker: String::from("moniker"),
severity: Severity::Info,
timestamp_nanos: 0.into(),
})
.set_format_printf("app", vec!["some".to_string(), "arg".to_string()])
.set_file("test file.cc")
.set_line(420)
.set_pid(1001)
.set_tid(200)
.set_dropped(2)
.add_tag("You're")
.add_tag("IT!")
.add_key(LogsProperty::String(LogsField::Other("key".to_string()), "value".to_string()));
let expected_json = json!({
"moniker": "moniker",
"version": 1,
"data_source": "Logs",
"payload": {
"root":
{
"keys":{
"key":"value"
},
"printf":{
"args":["some", "arg"],
"format":"app"
},
"message":{
}
}
},
"metadata": {
"errors": [],
"component_url": "url",
"errors": [{"dropped_logs":{"count":2}}],
"file": "test file.cc",
"line": 420,
"pid": 1001,
"severity": "INFO",
"tags": ["You're", "IT!"],
"tid": 200,
"timestamp": 0,
}
});
let result_json =
serde_json::to_value(&builder.build()).expect("serialization should succeed.");
pretty_assertions::assert_eq!(result_json, expected_json, "golden diff failed.");
}
#[fuchsia::test]
fn test_canonical_json_lifecycle_event_formatting() {
let json_schema = Data::for_lifecycle_event(
"a/b/c/d",
LifecycleType::DiagnosticsReady,
None,
TEST_URL,
123456i64,
Vec::new(),
);
let result_json =
serde_json::to_value(&json_schema).expect("serialization should succeed.");
let expected_json = json!({
"moniker": "a/b/c/d",
"version": 1,
"data_source": "LifecycleEvent",
"payload": null,
"metadata": {
"component_url": TEST_URL,
"lifecycle_event_type": "DiagnosticsReady",
"timestamp": 123456,
}
});
pretty_assertions::assert_eq!(result_json, expected_json, "golden diff failed.");
}
#[fuchsia::test]
fn test_errorful_json_lifecycle_event_formatting() {
let json_schema = Data::for_lifecycle_event(
"a/b/c/d",
LifecycleType::DiagnosticsReady,
None,
TEST_URL,
123456i64,
vec![LifecycleError { message: "too much fun being had.".to_string() }],
);
let result_json =
serde_json::to_value(&json_schema).expect("serialization should succeed.");
let expected_json = json!({
"moniker": "a/b/c/d",
"version": 1,
"data_source": "LifecycleEvent",
"payload": null,
"metadata": {
"errors": ["too much fun being had."],
"lifecycle_event_type": "DiagnosticsReady",
"component_url": TEST_URL,
"timestamp": 123456,
}
});
pretty_assertions::assert_eq!(result_json, expected_json, "golden diff failed.");
}
#[fuchsia::test]
fn display_for_logs() {
let data = LogsDataBuilder::new(BuilderArgs {
timestamp_nanos: Timestamp::from(12345678000i64).into(),
component_url: Some(String::from("fake-url")),
moniker: String::from("moniker"),
severity: Severity::Info,
})
.set_pid(123)
.set_tid(456)
.set_message("some message".to_string())
.set_file("some_file.cc".to_string())
.set_line(420)
.add_tag("foo")
.add_tag("bar")
.add_key(LogsProperty::String(LogsField::Other("test".to_string()), "property".to_string()))
.build();
assert_eq!(
"[00012.345678][123][456][moniker][foo,bar] INFO: [some_file.cc(420)] some message test=property",
format!("{}", data)
)
}
#[fuchsia::test]
fn display_for_logs_no_tags() {
let data = LogsDataBuilder::new(BuilderArgs {
timestamp_nanos: Timestamp::from(12345678000i64).into(),
component_url: Some(String::from("fake-url")),
moniker: String::from("moniker"),
severity: Severity::Info,
})
.set_pid(123)
.set_tid(456)
.set_message("some message".to_string())
.build();
assert_eq!("[00012.345678][123][456][moniker] INFO: some message", format!("{}", data))
}
#[fuchsia::test]
fn size_bytes_deserialize_backwards_compatibility() {
let original_json = json!({
"moniker": "a/b",
"version": 1,
"data_source": "Logs",
"payload": {
"root": {
"message":{}
}
},
"metadata": {
"component_url": "url",
"severity": "INFO",
"tags": [],
"timestamp": 123,
}
});
let expected_data = LogsDataBuilder::new(BuilderArgs {
component_url: Some("url".to_string()),
moniker: String::from("a/b"),
severity: Severity::Info,
timestamp_nanos: 123.into(),
})
.build();
let original_data: LogsData = serde_json::from_value(original_json).unwrap();
assert_eq!(original_data, expected_data);
// We skip deserializing the size_bytes
assert_eq!(original_data.metadata.size_bytes, None);
}
#[fuchsia::test]
fn dropped_deserialize_backwards_compatibility() {
let original_json = json!({
"moniker": "a/b",
"version": 1,
"data_source": "Logs",
"payload": {
"root": {
"message":{}
}
},
"metadata": {
"dropped": 0,
"component_url": "url",
"severity": "INFO",
"tags": [],
"timestamp": 123,
}
});
let expected_data = LogsDataBuilder::new(BuilderArgs {
component_url: Some("url".to_string()),
moniker: String::from("a/b"),
severity: Severity::Info,
timestamp_nanos: 123.into(),
})
.build();
let original_data: LogsData = serde_json::from_value(original_json).unwrap();
assert_eq!(original_data, expected_data);
// We skip deserializing dropped
assert_eq!(original_data.metadata.dropped, None);
}
}
| 31.922841 | 110 | 0.569499 |
09b6c33408208fd1ee9e8d819db7dbce17bcbae4 | 774 | //! Code that is shared between the client and server examples.
//!
//! This is mostly just the Message type declarations.
use serde::{Deserialize, Serialize};
pub const ADDR_LOCAL: &str = "127.0.0.1:7777";
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize)]
/// A message from a user
pub struct Msg {
pub from: String,
pub text: String,
}
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize)]
/// The connection message.
pub struct Connection {
pub user: String,
}
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]
/// The disconnection message.
pub struct Disconnect {
pub reason: String,
}
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize)]
/// The response message.
pub enum Response {
Accepted,
Rejected(String),
}
| 22.764706 | 63 | 0.700258 |
877402d017b51268888a1a372f95b424641442e5 | 1,492 | /*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
rs_allocation gAllocInX;
float __attribute__((kernel)) testAtan2FloatFloatFloat(float inY, unsigned int x) {
float inX = rsGetElementAt_float(gAllocInX, x);
return atan2(inY, inX);
}
float2 __attribute__((kernel)) testAtan2Float2Float2Float2(float2 inY, unsigned int x) {
float2 inX = rsGetElementAt_float2(gAllocInX, x);
return atan2(inY, inX);
}
float3 __attribute__((kernel)) testAtan2Float3Float3Float3(float3 inY, unsigned int x) {
float3 inX = rsGetElementAt_float3(gAllocInX, x);
return atan2(inY, inX);
}
float4 __attribute__((kernel)) testAtan2Float4Float4Float4(float4 inY, unsigned int x) {
float4 inX = rsGetElementAt_float4(gAllocInX, x);
return atan2(inY, inX);
}
| 34.697674 | 88 | 0.746649 |
719512958acb0aafe2badfa0e87e8b19cbf2ff93 | 4,190 | //! Types for the *m.presence* event.
use js_int::UInt;
use ruma_events_macros::ruma_event;
use ruma_identifiers::UserId;
use serde::{Deserialize, Serialize};
ruma_event! {
/// Informs the client of a user's presence state change.
PresenceEvent {
kind: Event,
event_type: Presence,
fields: {
/// The unique identifier for the user associated with this event.
pub sender: UserId,
},
content: {
/// The current avatar URL for this user.
#[serde(skip_serializing_if = "Option::is_none")]
pub avatar_url: Option<String>,
/// Whether or not the user is currently active.
#[serde(skip_serializing_if = "Option::is_none")]
pub currently_active: Option<bool>,
/// The current display name for this user.
#[serde(skip_serializing_if = "Option::is_none")]
pub displayname: Option<String>,
/// The last time since this user performed some action, in milliseconds.
#[serde(skip_serializing_if = "Option::is_none")]
pub last_active_ago: Option<UInt>,
/// The presence state for this user.
pub presence: PresenceState,
/// An optional description to accompany the presence.
#[serde(skip_serializing_if = "Option::is_none")]
pub status_msg: Option<String>,
},
}
}
/// A description of a user's connectivity and availability for chat.
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub enum PresenceState {
/// Disconnected from the service.
#[serde(rename = "offline")]
Offline,
/// Connected to the service.
#[serde(rename = "online")]
Online,
/// Connected to the service but not available for chat.
#[serde(rename = "unavailable")]
Unavailable,
/// Additional variants may be added in the future and will not be considered breaking changes
/// to ruma-events.
#[doc(hidden)]
#[serde(skip)]
__Nonexhaustive,
}
impl_enum! {
PresenceState {
Offline => "offline",
Online => "online",
Unavailable => "unavailable",
}
}
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
use js_int::UInt;
use ruma_identifiers::UserId;
use serde_json::to_string;
use super::{PresenceEvent, PresenceEventContent, PresenceState};
#[test]
fn serialization() {
let event = PresenceEvent {
content: PresenceEventContent {
avatar_url: Some("mxc://localhost:wefuiwegh8742w".to_string()),
currently_active: Some(false),
displayname: None,
last_active_ago: Some(UInt::try_from(2_478_593).unwrap()),
presence: PresenceState::Online,
status_msg: Some("Making cupcakes".to_string()),
},
sender: UserId::try_from("@example:localhost").unwrap(),
};
let json =
r#"{"content":{"avatar_url":"mxc://localhost:wefuiwegh8742w","currently_active":false,"last_active_ago":2478593,"presence":"online","status_msg":"Making cupcakes"},"sender":"@example:localhost","type":"m.presence"}"#;
assert_eq!(to_string(&event).unwrap(), json);
}
#[test]
fn deserialization() {
let event = PresenceEvent {
content: PresenceEventContent {
avatar_url: Some("mxc://localhost:wefuiwegh8742w".to_string()),
currently_active: Some(false),
displayname: None,
last_active_ago: Some(UInt::try_from(2_478_593).unwrap()),
presence: PresenceState::Online,
status_msg: Some("Making cupcakes".to_string()),
},
sender: UserId::try_from("@example:localhost").unwrap(),
};
let json =
r#"{"content":{"avatar_url":"mxc://localhost:wefuiwegh8742w","currently_active":false,"last_active_ago":2478593,"presence":"online","status_msg":"Making cupcakes"},"sender":"@example:localhost","type":"m.presence"}"#;
assert_eq!(json.parse::<PresenceEvent>().unwrap(), event);
}
}
| 33.790323 | 229 | 0.602148 |
0840815e240af2e5ec683a2fdd272939b79cd10c | 183,514 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
pub(crate) client: aws_smithy_client::Client<C, M, R>,
pub(crate) conf: crate::Config,
}
/// Client for Amazon CloudWatch Application Insights
///
/// Client for invoking operations on Amazon CloudWatch Application Insights. Each operation on Amazon CloudWatch Application Insights is a method on this
/// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service.
///
/// # Examples
/// **Constructing a client and invoking an operation**
/// ```rust,no_run
/// # async fn docs() {
/// // create a shared configuration. This can be used & shared between multiple service clients.
/// let shared_config = aws_config::load_from_env().await;
/// let client = aws_sdk_applicationinsights::Client::new(&shared_config);
/// // invoke an operation
/// /* let rsp = client
/// .<operation_name>().
/// .<param>("some value")
/// .send().await; */
/// # }
/// ```
/// **Constructing a client with custom configuration**
/// ```rust,no_run
/// use aws_config::RetryConfig;
/// # async fn docs() {
/// let shared_config = aws_config::load_from_env().await;
/// let config = aws_sdk_applicationinsights::config::Builder::from(&shared_config)
/// .retry_config(RetryConfig::disabled())
/// .build();
/// let client = aws_sdk_applicationinsights::Client::from_conf(config);
/// # }
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the [`CreateApplication`](crate::client::fluent_builders::CreateApplication) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::CreateApplication::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::CreateApplication::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`ops_center_enabled(bool)`](crate::client::fluent_builders::CreateApplication::ops_center_enabled) / [`set_ops_center_enabled(Option<bool>)`](crate::client::fluent_builders::CreateApplication::set_ops_center_enabled): <p> When set to <code>true</code>, creates opsItems for any problems detected on an application. </p>
/// - [`cwe_monitor_enabled(bool)`](crate::client::fluent_builders::CreateApplication::cwe_monitor_enabled) / [`set_cwe_monitor_enabled(Option<bool>)`](crate::client::fluent_builders::CreateApplication::set_cwe_monitor_enabled): <p> Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as <code>instance terminated</code>, <code>failed deployment</code>, and others. </p>
/// - [`ops_item_sns_topic_arn(impl Into<String>)`](crate::client::fluent_builders::CreateApplication::ops_item_sns_topic_arn) / [`set_ops_item_sns_topic_arn(Option<String>)`](crate::client::fluent_builders::CreateApplication::set_ops_item_sns_topic_arn): <p> The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to receive notifications for updates to the opsItem. </p>
/// - [`tags(Vec<Tag>)`](crate::client::fluent_builders::CreateApplication::tags) / [`set_tags(Option<Vec<Tag>>)`](crate::client::fluent_builders::CreateApplication::set_tags): <p>List of tags to add to the application. tag key (<code>Key</code>) and an associated tag value (<code>Value</code>). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.</p>
/// - [`auto_config_enabled(bool)`](crate::client::fluent_builders::CreateApplication::auto_config_enabled) / [`set_auto_config_enabled(Option<bool>)`](crate::client::fluent_builders::CreateApplication::set_auto_config_enabled): (undocumented)
/// - [`auto_create(bool)`](crate::client::fluent_builders::CreateApplication::auto_create) / [`set_auto_create(Option<bool>)`](crate::client::fluent_builders::CreateApplication::set_auto_create): (undocumented)
/// - On success, responds with [`CreateApplicationOutput`](crate::output::CreateApplicationOutput) with field(s):
/// - [`application_info(Option<ApplicationInfo>)`](crate::output::CreateApplicationOutput::application_info): <p>Information about the application.</p>
/// - On failure, responds with [`SdkError<CreateApplicationError>`](crate::error::CreateApplicationError)
pub fn create_application(&self) -> fluent_builders::CreateApplication<C, M, R> {
fluent_builders::CreateApplication::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`CreateComponent`](crate::client::fluent_builders::CreateComponent) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::CreateComponent::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::CreateComponent::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::CreateComponent::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::CreateComponent::set_component_name): <p>The name of the component.</p>
/// - [`resource_list(Vec<String>)`](crate::client::fluent_builders::CreateComponent::resource_list) / [`set_resource_list(Option<Vec<String>>)`](crate::client::fluent_builders::CreateComponent::set_resource_list): <p>The list of resource ARNs that belong to the component.</p>
/// - On success, responds with [`CreateComponentOutput`](crate::output::CreateComponentOutput)
/// - On failure, responds with [`SdkError<CreateComponentError>`](crate::error::CreateComponentError)
pub fn create_component(&self) -> fluent_builders::CreateComponent<C, M, R> {
fluent_builders::CreateComponent::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`CreateLogPattern`](crate::client::fluent_builders::CreateLogPattern) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::CreateLogPattern::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::CreateLogPattern::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`pattern_set_name(impl Into<String>)`](crate::client::fluent_builders::CreateLogPattern::pattern_set_name) / [`set_pattern_set_name(Option<String>)`](crate::client::fluent_builders::CreateLogPattern::set_pattern_set_name): <p>The name of the log pattern set.</p>
/// - [`pattern_name(impl Into<String>)`](crate::client::fluent_builders::CreateLogPattern::pattern_name) / [`set_pattern_name(Option<String>)`](crate::client::fluent_builders::CreateLogPattern::set_pattern_name): <p>The name of the log pattern.</p>
/// - [`pattern(impl Into<String>)`](crate::client::fluent_builders::CreateLogPattern::pattern) / [`set_pattern(Option<String>)`](crate::client::fluent_builders::CreateLogPattern::set_pattern): <p>The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.</p>
/// - [`rank(i32)`](crate::client::fluent_builders::CreateLogPattern::rank) / [`set_rank(i32)`](crate::client::fluent_builders::CreateLogPattern::set_rank): <p>Rank of the log pattern. Must be a value between <code>1</code> and <code>1,000,000</code>. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank <code>1</code> will be the first to get matched to a log line. A pattern of rank <code>1,000,000</code> will be last to get matched. When you configure custom log patterns from the console, a <code>Low</code> severity pattern translates to a <code>750,000</code> rank. A <code>Medium</code> severity pattern translates to a <code>500,000</code> rank. And a <code>High</code> severity pattern translates to a <code>250,000</code> rank. Rank values less than <code>1</code> or greater than <code>1,000,000</code> are reserved for AWS-provided patterns. </p>
/// - On success, responds with [`CreateLogPatternOutput`](crate::output::CreateLogPatternOutput) with field(s):
/// - [`log_pattern(Option<LogPattern>)`](crate::output::CreateLogPatternOutput::log_pattern): <p>The successfully created log pattern.</p>
/// - [`resource_group_name(Option<String>)`](crate::output::CreateLogPatternOutput::resource_group_name): <p>The name of the resource group.</p>
/// - On failure, responds with [`SdkError<CreateLogPatternError>`](crate::error::CreateLogPatternError)
pub fn create_log_pattern(&self) -> fluent_builders::CreateLogPattern<C, M, R> {
fluent_builders::CreateLogPattern::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DeleteApplication`](crate::client::fluent_builders::DeleteApplication) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DeleteApplication::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DeleteApplication::set_resource_group_name): <p>The name of the resource group.</p>
/// - On success, responds with [`DeleteApplicationOutput`](crate::output::DeleteApplicationOutput)
/// - On failure, responds with [`SdkError<DeleteApplicationError>`](crate::error::DeleteApplicationError)
pub fn delete_application(&self) -> fluent_builders::DeleteApplication<C, M, R> {
fluent_builders::DeleteApplication::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DeleteComponent`](crate::client::fluent_builders::DeleteComponent) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DeleteComponent::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DeleteComponent::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::DeleteComponent::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::DeleteComponent::set_component_name): <p>The name of the component.</p>
/// - On success, responds with [`DeleteComponentOutput`](crate::output::DeleteComponentOutput)
/// - On failure, responds with [`SdkError<DeleteComponentError>`](crate::error::DeleteComponentError)
pub fn delete_component(&self) -> fluent_builders::DeleteComponent<C, M, R> {
fluent_builders::DeleteComponent::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DeleteLogPattern`](crate::client::fluent_builders::DeleteLogPattern) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DeleteLogPattern::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DeleteLogPattern::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`pattern_set_name(impl Into<String>)`](crate::client::fluent_builders::DeleteLogPattern::pattern_set_name) / [`set_pattern_set_name(Option<String>)`](crate::client::fluent_builders::DeleteLogPattern::set_pattern_set_name): <p>The name of the log pattern set.</p>
/// - [`pattern_name(impl Into<String>)`](crate::client::fluent_builders::DeleteLogPattern::pattern_name) / [`set_pattern_name(Option<String>)`](crate::client::fluent_builders::DeleteLogPattern::set_pattern_name): <p>The name of the log pattern.</p>
/// - On success, responds with [`DeleteLogPatternOutput`](crate::output::DeleteLogPatternOutput)
/// - On failure, responds with [`SdkError<DeleteLogPatternError>`](crate::error::DeleteLogPatternError)
pub fn delete_log_pattern(&self) -> fluent_builders::DeleteLogPattern<C, M, R> {
fluent_builders::DeleteLogPattern::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeApplication`](crate::client::fluent_builders::DescribeApplication) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DescribeApplication::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DescribeApplication::set_resource_group_name): <p>The name of the resource group.</p>
/// - On success, responds with [`DescribeApplicationOutput`](crate::output::DescribeApplicationOutput) with field(s):
/// - [`application_info(Option<ApplicationInfo>)`](crate::output::DescribeApplicationOutput::application_info): <p>Information about the application.</p>
/// - On failure, responds with [`SdkError<DescribeApplicationError>`](crate::error::DescribeApplicationError)
pub fn describe_application(&self) -> fluent_builders::DescribeApplication<C, M, R> {
fluent_builders::DescribeApplication::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeComponent`](crate::client::fluent_builders::DescribeComponent) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DescribeComponent::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DescribeComponent::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::DescribeComponent::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::DescribeComponent::set_component_name): <p>The name of the component.</p>
/// - On success, responds with [`DescribeComponentOutput`](crate::output::DescribeComponentOutput) with field(s):
/// - [`application_component(Option<ApplicationComponent>)`](crate::output::DescribeComponentOutput::application_component): <p>Describes a standalone resource or similarly grouped resources that the application is made up of.</p>
/// - [`resource_list(Option<Vec<String>>)`](crate::output::DescribeComponentOutput::resource_list): <p>The list of resource ARNs that belong to the component.</p>
/// - On failure, responds with [`SdkError<DescribeComponentError>`](crate::error::DescribeComponentError)
pub fn describe_component(&self) -> fluent_builders::DescribeComponent<C, M, R> {
fluent_builders::DescribeComponent::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeComponentConfiguration`](crate::client::fluent_builders::DescribeComponentConfiguration) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DescribeComponentConfiguration::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DescribeComponentConfiguration::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::DescribeComponentConfiguration::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::DescribeComponentConfiguration::set_component_name): <p>The name of the component.</p>
/// - On success, responds with [`DescribeComponentConfigurationOutput`](crate::output::DescribeComponentConfigurationOutput) with field(s):
/// - [`monitor(Option<bool>)`](crate::output::DescribeComponentConfigurationOutput::monitor): <p>Indicates whether the application component is monitored.</p>
/// - [`tier(Option<Tier>)`](crate::output::DescribeComponentConfigurationOutput::tier): <p>The tier of the application component. Supported tiers include <code>DOT_NET_CORE</code>, <code>DOT_NET_WORKER</code>, <code>DOT_NET_WEB</code>, <code>SQL_SERVER</code>, and <code>DEFAULT</code> </p>
/// - [`component_configuration(Option<String>)`](crate::output::DescribeComponentConfigurationOutput::component_configuration): <p>The configuration settings of the component. The value is the escaped JSON of the configuration.</p>
/// - On failure, responds with [`SdkError<DescribeComponentConfigurationError>`](crate::error::DescribeComponentConfigurationError)
pub fn describe_component_configuration(
&self,
) -> fluent_builders::DescribeComponentConfiguration<C, M, R> {
fluent_builders::DescribeComponentConfiguration::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeComponentConfigurationRecommendation`](crate::client::fluent_builders::DescribeComponentConfigurationRecommendation) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DescribeComponentConfigurationRecommendation::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DescribeComponentConfigurationRecommendation::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::DescribeComponentConfigurationRecommendation::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::DescribeComponentConfigurationRecommendation::set_component_name): <p>The name of the component.</p>
/// - [`tier(Tier)`](crate::client::fluent_builders::DescribeComponentConfigurationRecommendation::tier) / [`set_tier(Option<Tier>)`](crate::client::fluent_builders::DescribeComponentConfigurationRecommendation::set_tier): <p>The tier of the application component. Supported tiers include <code>DOT_NET_CORE</code>, <code>DOT_NET_WORKER</code>, <code>DOT_NET_WEB</code>, <code>SQL_SERVER</code>, and <code>DEFAULT</code>.</p>
/// - On success, responds with [`DescribeComponentConfigurationRecommendationOutput`](crate::output::DescribeComponentConfigurationRecommendationOutput) with field(s):
/// - [`component_configuration(Option<String>)`](crate::output::DescribeComponentConfigurationRecommendationOutput::component_configuration): <p>The recommended configuration settings of the component. The value is the escaped JSON of the configuration.</p>
/// - On failure, responds with [`SdkError<DescribeComponentConfigurationRecommendationError>`](crate::error::DescribeComponentConfigurationRecommendationError)
pub fn describe_component_configuration_recommendation(
&self,
) -> fluent_builders::DescribeComponentConfigurationRecommendation<C, M, R> {
fluent_builders::DescribeComponentConfigurationRecommendation::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeLogPattern`](crate::client::fluent_builders::DescribeLogPattern) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::DescribeLogPattern::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::DescribeLogPattern::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`pattern_set_name(impl Into<String>)`](crate::client::fluent_builders::DescribeLogPattern::pattern_set_name) / [`set_pattern_set_name(Option<String>)`](crate::client::fluent_builders::DescribeLogPattern::set_pattern_set_name): <p>The name of the log pattern set.</p>
/// - [`pattern_name(impl Into<String>)`](crate::client::fluent_builders::DescribeLogPattern::pattern_name) / [`set_pattern_name(Option<String>)`](crate::client::fluent_builders::DescribeLogPattern::set_pattern_name): <p>The name of the log pattern.</p>
/// - On success, responds with [`DescribeLogPatternOutput`](crate::output::DescribeLogPatternOutput) with field(s):
/// - [`resource_group_name(Option<String>)`](crate::output::DescribeLogPatternOutput::resource_group_name): <p>The name of the resource group.</p>
/// - [`log_pattern(Option<LogPattern>)`](crate::output::DescribeLogPatternOutput::log_pattern): <p>The successfully created log pattern.</p>
/// - On failure, responds with [`SdkError<DescribeLogPatternError>`](crate::error::DescribeLogPatternError)
pub fn describe_log_pattern(&self) -> fluent_builders::DescribeLogPattern<C, M, R> {
fluent_builders::DescribeLogPattern::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeObservation`](crate::client::fluent_builders::DescribeObservation) operation.
///
/// - The fluent builder is configurable:
/// - [`observation_id(impl Into<String>)`](crate::client::fluent_builders::DescribeObservation::observation_id) / [`set_observation_id(Option<String>)`](crate::client::fluent_builders::DescribeObservation::set_observation_id): <p>The ID of the observation.</p>
/// - On success, responds with [`DescribeObservationOutput`](crate::output::DescribeObservationOutput) with field(s):
/// - [`observation(Option<Observation>)`](crate::output::DescribeObservationOutput::observation): <p>Information about the observation.</p>
/// - On failure, responds with [`SdkError<DescribeObservationError>`](crate::error::DescribeObservationError)
pub fn describe_observation(&self) -> fluent_builders::DescribeObservation<C, M, R> {
fluent_builders::DescribeObservation::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeProblem`](crate::client::fluent_builders::DescribeProblem) operation.
///
/// - The fluent builder is configurable:
/// - [`problem_id(impl Into<String>)`](crate::client::fluent_builders::DescribeProblem::problem_id) / [`set_problem_id(Option<String>)`](crate::client::fluent_builders::DescribeProblem::set_problem_id): <p>The ID of the problem.</p>
/// - On success, responds with [`DescribeProblemOutput`](crate::output::DescribeProblemOutput) with field(s):
/// - [`problem(Option<Problem>)`](crate::output::DescribeProblemOutput::problem): <p>Information about the problem. </p>
/// - On failure, responds with [`SdkError<DescribeProblemError>`](crate::error::DescribeProblemError)
pub fn describe_problem(&self) -> fluent_builders::DescribeProblem<C, M, R> {
fluent_builders::DescribeProblem::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeProblemObservations`](crate::client::fluent_builders::DescribeProblemObservations) operation.
///
/// - The fluent builder is configurable:
/// - [`problem_id(impl Into<String>)`](crate::client::fluent_builders::DescribeProblemObservations::problem_id) / [`set_problem_id(Option<String>)`](crate::client::fluent_builders::DescribeProblemObservations::set_problem_id): <p>The ID of the problem.</p>
/// - On success, responds with [`DescribeProblemObservationsOutput`](crate::output::DescribeProblemObservationsOutput) with field(s):
/// - [`related_observations(Option<RelatedObservations>)`](crate::output::DescribeProblemObservationsOutput::related_observations): <p>Observations related to the problem.</p>
/// - On failure, responds with [`SdkError<DescribeProblemObservationsError>`](crate::error::DescribeProblemObservationsError)
pub fn describe_problem_observations(
&self,
) -> fluent_builders::DescribeProblemObservations<C, M, R> {
fluent_builders::DescribeProblemObservations::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListApplications`](crate::client::fluent_builders::ListApplications) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListApplications::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`max_results(i32)`](crate::client::fluent_builders::ListApplications::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListApplications::set_max_results): <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListApplications::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListApplications::set_next_token): <p>The token to request the next page of results.</p>
/// - On success, responds with [`ListApplicationsOutput`](crate::output::ListApplicationsOutput) with field(s):
/// - [`application_info_list(Option<Vec<ApplicationInfo>>)`](crate::output::ListApplicationsOutput::application_info_list): <p>The list of applications.</p>
/// - [`next_token(Option<String>)`](crate::output::ListApplicationsOutput::next_token): <p>The token used to retrieve the next page of results. This value is <code>null</code> when there are no more results to return. </p>
/// - On failure, responds with [`SdkError<ListApplicationsError>`](crate::error::ListApplicationsError)
pub fn list_applications(&self) -> fluent_builders::ListApplications<C, M, R> {
fluent_builders::ListApplications::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListComponents`](crate::client::fluent_builders::ListComponents) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListComponents::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::ListComponents::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::ListComponents::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`max_results(i32)`](crate::client::fluent_builders::ListComponents::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListComponents::set_max_results): <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListComponents::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListComponents::set_next_token): <p>The token to request the next page of results.</p>
/// - On success, responds with [`ListComponentsOutput`](crate::output::ListComponentsOutput) with field(s):
/// - [`application_component_list(Option<Vec<ApplicationComponent>>)`](crate::output::ListComponentsOutput::application_component_list): <p>The list of application components.</p>
/// - [`next_token(Option<String>)`](crate::output::ListComponentsOutput::next_token): <p>The token to request the next page of results.</p>
/// - On failure, responds with [`SdkError<ListComponentsError>`](crate::error::ListComponentsError)
pub fn list_components(&self) -> fluent_builders::ListComponents<C, M, R> {
fluent_builders::ListComponents::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListConfigurationHistory`](crate::client::fluent_builders::ListConfigurationHistory) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListConfigurationHistory::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::ListConfigurationHistory::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::ListConfigurationHistory::set_resource_group_name): <p>Resource group to which the application belongs. </p>
/// - [`start_time(DateTime)`](crate::client::fluent_builders::ListConfigurationHistory::start_time) / [`set_start_time(Option<DateTime>)`](crate::client::fluent_builders::ListConfigurationHistory::set_start_time): <p>The start time of the event. </p>
/// - [`end_time(DateTime)`](crate::client::fluent_builders::ListConfigurationHistory::end_time) / [`set_end_time(Option<DateTime>)`](crate::client::fluent_builders::ListConfigurationHistory::set_end_time): <p>The end time of the event.</p>
/// - [`event_status(ConfigurationEventStatus)`](crate::client::fluent_builders::ListConfigurationHistory::event_status) / [`set_event_status(Option<ConfigurationEventStatus>)`](crate::client::fluent_builders::ListConfigurationHistory::set_event_status): <p>The status of the configuration update event. Possible values include INFO, WARN, and ERROR.</p>
/// - [`max_results(i32)`](crate::client::fluent_builders::ListConfigurationHistory::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListConfigurationHistory::set_max_results): <p> The maximum number of results returned by <code>ListConfigurationHistory</code> in paginated output. When this parameter is used, <code>ListConfigurationHistory</code> returns only <code>MaxResults</code> in a single page along with a <code>NextToken</code> response element. The remaining results of the initial request can be seen by sending another <code>ListConfigurationHistory</code> request with the returned <code>NextToken</code> value. If this parameter is not used, then <code>ListConfigurationHistory</code> returns all results. </p>
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListConfigurationHistory::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListConfigurationHistory::set_next_token): <p>The <code>NextToken</code> value returned from a previous paginated <code>ListConfigurationHistory</code> request where <code>MaxResults</code> was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the <code>NextToken</code> value. This value is <code>null</code> when there are no more results to return.</p>
/// - On success, responds with [`ListConfigurationHistoryOutput`](crate::output::ListConfigurationHistoryOutput) with field(s):
/// - [`event_list(Option<Vec<ConfigurationEvent>>)`](crate::output::ListConfigurationHistoryOutput::event_list): <p> The list of configuration events and their corresponding details. </p>
/// - [`next_token(Option<String>)`](crate::output::ListConfigurationHistoryOutput::next_token): <p>The <code>NextToken</code> value to include in a future <code>ListConfigurationHistory</code> request. When the results of a <code>ListConfigurationHistory</code> request exceed <code>MaxResults</code>, this value can be used to retrieve the next page of results. This value is <code>null</code> when there are no more results to return.</p>
/// - On failure, responds with [`SdkError<ListConfigurationHistoryError>`](crate::error::ListConfigurationHistoryError)
pub fn list_configuration_history(&self) -> fluent_builders::ListConfigurationHistory<C, M, R> {
fluent_builders::ListConfigurationHistory::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListLogPatterns`](crate::client::fluent_builders::ListLogPatterns) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListLogPatterns::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::ListLogPatterns::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::ListLogPatterns::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`pattern_set_name(impl Into<String>)`](crate::client::fluent_builders::ListLogPatterns::pattern_set_name) / [`set_pattern_set_name(Option<String>)`](crate::client::fluent_builders::ListLogPatterns::set_pattern_set_name): <p>The name of the log pattern set.</p>
/// - [`max_results(i32)`](crate::client::fluent_builders::ListLogPatterns::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListLogPatterns::set_max_results): <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListLogPatterns::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListLogPatterns::set_next_token): <p>The token to request the next page of results.</p>
/// - On success, responds with [`ListLogPatternsOutput`](crate::output::ListLogPatternsOutput) with field(s):
/// - [`resource_group_name(Option<String>)`](crate::output::ListLogPatternsOutput::resource_group_name): <p>The name of the resource group.</p>
/// - [`log_patterns(Option<Vec<LogPattern>>)`](crate::output::ListLogPatternsOutput::log_patterns): <p>The list of log patterns.</p>
/// - [`next_token(Option<String>)`](crate::output::ListLogPatternsOutput::next_token): <p>The token used to retrieve the next page of results. This value is <code>null</code> when there are no more results to return. </p>
/// - On failure, responds with [`SdkError<ListLogPatternsError>`](crate::error::ListLogPatternsError)
pub fn list_log_patterns(&self) -> fluent_builders::ListLogPatterns<C, M, R> {
fluent_builders::ListLogPatterns::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListLogPatternSets`](crate::client::fluent_builders::ListLogPatternSets) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListLogPatternSets::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::ListLogPatternSets::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::ListLogPatternSets::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`max_results(i32)`](crate::client::fluent_builders::ListLogPatternSets::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListLogPatternSets::set_max_results): <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListLogPatternSets::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListLogPatternSets::set_next_token): <p>The token to request the next page of results.</p>
/// - On success, responds with [`ListLogPatternSetsOutput`](crate::output::ListLogPatternSetsOutput) with field(s):
/// - [`resource_group_name(Option<String>)`](crate::output::ListLogPatternSetsOutput::resource_group_name): <p>The name of the resource group.</p>
/// - [`log_pattern_sets(Option<Vec<String>>)`](crate::output::ListLogPatternSetsOutput::log_pattern_sets): <p>The list of log pattern sets.</p>
/// - [`next_token(Option<String>)`](crate::output::ListLogPatternSetsOutput::next_token): <p>The token used to retrieve the next page of results. This value is <code>null</code> when there are no more results to return. </p>
/// - On failure, responds with [`SdkError<ListLogPatternSetsError>`](crate::error::ListLogPatternSetsError)
pub fn list_log_pattern_sets(&self) -> fluent_builders::ListLogPatternSets<C, M, R> {
fluent_builders::ListLogPatternSets::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListProblems`](crate::client::fluent_builders::ListProblems) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListProblems::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::ListProblems::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::ListProblems::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`start_time(DateTime)`](crate::client::fluent_builders::ListProblems::start_time) / [`set_start_time(Option<DateTime>)`](crate::client::fluent_builders::ListProblems::set_start_time): <p>The time when the problem was detected, in epoch seconds. If you don't specify a time frame for the request, problems within the past seven days are returned.</p>
/// - [`end_time(DateTime)`](crate::client::fluent_builders::ListProblems::end_time) / [`set_end_time(Option<DateTime>)`](crate::client::fluent_builders::ListProblems::set_end_time): <p>The time when the problem ended, in epoch seconds. If not specified, problems within the past seven days are returned.</p>
/// - [`max_results(i32)`](crate::client::fluent_builders::ListProblems::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::ListProblems::set_max_results): <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListProblems::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListProblems::set_next_token): <p>The token to request the next page of results.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::ListProblems::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::ListProblems::set_component_name): (undocumented)
/// - On success, responds with [`ListProblemsOutput`](crate::output::ListProblemsOutput) with field(s):
/// - [`problem_list(Option<Vec<Problem>>)`](crate::output::ListProblemsOutput::problem_list): <p>The list of problems. </p>
/// - [`next_token(Option<String>)`](crate::output::ListProblemsOutput::next_token): <p>The token used to retrieve the next page of results. This value is <code>null</code> when there are no more results to return. </p>
/// - [`resource_group_name(Option<String>)`](crate::output::ListProblemsOutput::resource_group_name): (undocumented)
/// - On failure, responds with [`SdkError<ListProblemsError>`](crate::error::ListProblemsError)
pub fn list_problems(&self) -> fluent_builders::ListProblems<C, M, R> {
fluent_builders::ListProblems::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::ListTagsForResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::ListTagsForResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the application that you want to retrieve tag information for.</p>
/// - On success, responds with [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) with field(s):
/// - [`tags(Option<Vec<Tag>>)`](crate::output::ListTagsForResourceOutput::tags): <p>An array that lists all the tags that are associated with the application. Each tag consists of a required tag key (<code>Key</code>) and an associated tag value (<code>Value</code>).</p>
/// - On failure, responds with [`SdkError<ListTagsForResourceError>`](crate::error::ListTagsForResourceError)
pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource<C, M, R> {
fluent_builders::ListTagsForResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`TagResource`](crate::client::fluent_builders::TagResource) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::TagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::TagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the application that you want to add one or more tags to.</p>
/// - [`tags(Vec<Tag>)`](crate::client::fluent_builders::TagResource::tags) / [`set_tags(Option<Vec<Tag>>)`](crate::client::fluent_builders::TagResource::set_tags): <p>A list of tags that to add to the application. A tag consists of a required tag key (<code>Key</code>) and an associated tag value (<code>Value</code>). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.</p>
/// - On success, responds with [`TagResourceOutput`](crate::output::TagResourceOutput)
/// - On failure, responds with [`SdkError<TagResourceError>`](crate::error::TagResourceError)
pub fn tag_resource(&self) -> fluent_builders::TagResource<C, M, R> {
fluent_builders::TagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UntagResource`](crate::client::fluent_builders::UntagResource) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::UntagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::UntagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the application that you want to remove one or more tags from.</p>
/// - [`tag_keys(Vec<String>)`](crate::client::fluent_builders::UntagResource::tag_keys) / [`set_tag_keys(Option<Vec<String>>)`](crate::client::fluent_builders::UntagResource::set_tag_keys): <p>The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.</p> <p>To remove more than one tag from the application, append the <code>TagKeys</code> parameter and argument for each additional tag to remove, separated by an ampersand. </p>
/// - On success, responds with [`UntagResourceOutput`](crate::output::UntagResourceOutput)
/// - On failure, responds with [`SdkError<UntagResourceError>`](crate::error::UntagResourceError)
pub fn untag_resource(&self) -> fluent_builders::UntagResource<C, M, R> {
fluent_builders::UntagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UpdateApplication`](crate::client::fluent_builders::UpdateApplication) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::UpdateApplication::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::UpdateApplication::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`ops_center_enabled(bool)`](crate::client::fluent_builders::UpdateApplication::ops_center_enabled) / [`set_ops_center_enabled(Option<bool>)`](crate::client::fluent_builders::UpdateApplication::set_ops_center_enabled): <p> When set to <code>true</code>, creates opsItems for any problems detected on an application. </p>
/// - [`cwe_monitor_enabled(bool)`](crate::client::fluent_builders::UpdateApplication::cwe_monitor_enabled) / [`set_cwe_monitor_enabled(Option<bool>)`](crate::client::fluent_builders::UpdateApplication::set_cwe_monitor_enabled): <p> Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as <code>instance terminated</code>, <code>failed deployment</code>, and others. </p>
/// - [`ops_item_sns_topic_arn(impl Into<String>)`](crate::client::fluent_builders::UpdateApplication::ops_item_sns_topic_arn) / [`set_ops_item_sns_topic_arn(Option<String>)`](crate::client::fluent_builders::UpdateApplication::set_ops_item_sns_topic_arn): <p> The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to receive notifications for updates to the opsItem.</p>
/// - [`remove_sns_topic(bool)`](crate::client::fluent_builders::UpdateApplication::remove_sns_topic) / [`set_remove_sns_topic(Option<bool>)`](crate::client::fluent_builders::UpdateApplication::set_remove_sns_topic): <p> Disassociates the SNS topic from the opsItem created for detected problems.</p>
/// - [`auto_config_enabled(bool)`](crate::client::fluent_builders::UpdateApplication::auto_config_enabled) / [`set_auto_config_enabled(Option<bool>)`](crate::client::fluent_builders::UpdateApplication::set_auto_config_enabled): (undocumented)
/// - On success, responds with [`UpdateApplicationOutput`](crate::output::UpdateApplicationOutput) with field(s):
/// - [`application_info(Option<ApplicationInfo>)`](crate::output::UpdateApplicationOutput::application_info): <p>Information about the application. </p>
/// - On failure, responds with [`SdkError<UpdateApplicationError>`](crate::error::UpdateApplicationError)
pub fn update_application(&self) -> fluent_builders::UpdateApplication<C, M, R> {
fluent_builders::UpdateApplication::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UpdateComponent`](crate::client::fluent_builders::UpdateComponent) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::UpdateComponent::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::UpdateComponent::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::UpdateComponent::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::UpdateComponent::set_component_name): <p>The name of the component.</p>
/// - [`new_component_name(impl Into<String>)`](crate::client::fluent_builders::UpdateComponent::new_component_name) / [`set_new_component_name(Option<String>)`](crate::client::fluent_builders::UpdateComponent::set_new_component_name): <p>The new name of the component.</p>
/// - [`resource_list(Vec<String>)`](crate::client::fluent_builders::UpdateComponent::resource_list) / [`set_resource_list(Option<Vec<String>>)`](crate::client::fluent_builders::UpdateComponent::set_resource_list): <p>The list of resource ARNs that belong to the component.</p>
/// - On success, responds with [`UpdateComponentOutput`](crate::output::UpdateComponentOutput)
/// - On failure, responds with [`SdkError<UpdateComponentError>`](crate::error::UpdateComponentError)
pub fn update_component(&self) -> fluent_builders::UpdateComponent<C, M, R> {
fluent_builders::UpdateComponent::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UpdateComponentConfiguration`](crate::client::fluent_builders::UpdateComponentConfiguration) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::UpdateComponentConfiguration::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::UpdateComponentConfiguration::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`component_name(impl Into<String>)`](crate::client::fluent_builders::UpdateComponentConfiguration::component_name) / [`set_component_name(Option<String>)`](crate::client::fluent_builders::UpdateComponentConfiguration::set_component_name): <p>The name of the component.</p>
/// - [`monitor(bool)`](crate::client::fluent_builders::UpdateComponentConfiguration::monitor) / [`set_monitor(Option<bool>)`](crate::client::fluent_builders::UpdateComponentConfiguration::set_monitor): <p>Indicates whether the application component is monitored.</p>
/// - [`tier(Tier)`](crate::client::fluent_builders::UpdateComponentConfiguration::tier) / [`set_tier(Option<Tier>)`](crate::client::fluent_builders::UpdateComponentConfiguration::set_tier): <p>The tier of the application component. Supported tiers include <code>DOT_NET_WORKER</code>, <code>DOT_NET_WEB</code>, <code>DOT_NET_CORE</code>, <code>SQL_SERVER</code>, and <code>DEFAULT</code>.</p>
/// - [`component_configuration(impl Into<String>)`](crate::client::fluent_builders::UpdateComponentConfiguration::component_configuration) / [`set_component_configuration(Option<String>)`](crate::client::fluent_builders::UpdateComponentConfiguration::set_component_configuration): <p>The configuration settings of the component. The value is the escaped JSON of the configuration. For more information about the JSON format, see <a href="https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/working-with-json.html">Working with JSON</a>. You can send a request to <code>DescribeComponentConfigurationRecommendation</code> to see the recommended configuration for a component. For the complete format of the component configuration file, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/component-config.html">Component Configuration</a>.</p>
/// - [`auto_config_enabled(bool)`](crate::client::fluent_builders::UpdateComponentConfiguration::auto_config_enabled) / [`set_auto_config_enabled(Option<bool>)`](crate::client::fluent_builders::UpdateComponentConfiguration::set_auto_config_enabled): (undocumented)
/// - On success, responds with [`UpdateComponentConfigurationOutput`](crate::output::UpdateComponentConfigurationOutput)
/// - On failure, responds with [`SdkError<UpdateComponentConfigurationError>`](crate::error::UpdateComponentConfigurationError)
pub fn update_component_configuration(
&self,
) -> fluent_builders::UpdateComponentConfiguration<C, M, R> {
fluent_builders::UpdateComponentConfiguration::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UpdateLogPattern`](crate::client::fluent_builders::UpdateLogPattern) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_group_name(impl Into<String>)`](crate::client::fluent_builders::UpdateLogPattern::resource_group_name) / [`set_resource_group_name(Option<String>)`](crate::client::fluent_builders::UpdateLogPattern::set_resource_group_name): <p>The name of the resource group.</p>
/// - [`pattern_set_name(impl Into<String>)`](crate::client::fluent_builders::UpdateLogPattern::pattern_set_name) / [`set_pattern_set_name(Option<String>)`](crate::client::fluent_builders::UpdateLogPattern::set_pattern_set_name): <p>The name of the log pattern set.</p>
/// - [`pattern_name(impl Into<String>)`](crate::client::fluent_builders::UpdateLogPattern::pattern_name) / [`set_pattern_name(Option<String>)`](crate::client::fluent_builders::UpdateLogPattern::set_pattern_name): <p>The name of the log pattern.</p>
/// - [`pattern(impl Into<String>)`](crate::client::fluent_builders::UpdateLogPattern::pattern) / [`set_pattern(Option<String>)`](crate::client::fluent_builders::UpdateLogPattern::set_pattern): <p>The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.</p>
/// - [`rank(i32)`](crate::client::fluent_builders::UpdateLogPattern::rank) / [`set_rank(i32)`](crate::client::fluent_builders::UpdateLogPattern::set_rank): <p>Rank of the log pattern. Must be a value between <code>1</code> and <code>1,000,000</code>. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank <code>1</code> will be the first to get matched to a log line. A pattern of rank <code>1,000,000</code> will be last to get matched. When you configure custom log patterns from the console, a <code>Low</code> severity pattern translates to a <code>750,000</code> rank. A <code>Medium</code> severity pattern translates to a <code>500,000</code> rank. And a <code>High</code> severity pattern translates to a <code>250,000</code> rank. Rank values less than <code>1</code> or greater than <code>1,000,000</code> are reserved for AWS-provided patterns. </p>
/// - On success, responds with [`UpdateLogPatternOutput`](crate::output::UpdateLogPatternOutput) with field(s):
/// - [`resource_group_name(Option<String>)`](crate::output::UpdateLogPatternOutput::resource_group_name): <p>The name of the resource group.</p>
/// - [`log_pattern(Option<LogPattern>)`](crate::output::UpdateLogPatternOutput::log_pattern): <p>The successfully created log pattern.</p>
/// - On failure, responds with [`SdkError<UpdateLogPatternError>`](crate::error::UpdateLogPatternError)
pub fn update_log_pattern(&self) -> fluent_builders::UpdateLogPattern<C, M, R> {
fluent_builders::UpdateLogPattern::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `CreateApplication`.
///
/// <p>Adds an application that is created from a resource group.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct CreateApplication<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_application_input::Builder,
}
impl<C, M, R> CreateApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::CreateApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateApplicationInputOperationOutputAlias,
crate::output::CreateApplicationOutput,
crate::error::CreateApplicationError,
crate::input::CreateApplicationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p> When set to <code>true</code>, creates opsItems for any problems detected on an application. </p>
pub fn ops_center_enabled(mut self, input: bool) -> Self {
self.inner = self.inner.ops_center_enabled(input);
self
}
/// <p> When set to <code>true</code>, creates opsItems for any problems detected on an application. </p>
pub fn set_ops_center_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_ops_center_enabled(input);
self
}
/// <p> Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as <code>instance terminated</code>, <code>failed deployment</code>, and others. </p>
pub fn cwe_monitor_enabled(mut self, input: bool) -> Self {
self.inner = self.inner.cwe_monitor_enabled(input);
self
}
/// <p> Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as <code>instance terminated</code>, <code>failed deployment</code>, and others. </p>
pub fn set_cwe_monitor_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_cwe_monitor_enabled(input);
self
}
/// <p> The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to receive notifications for updates to the opsItem. </p>
pub fn ops_item_sns_topic_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.ops_item_sns_topic_arn(input.into());
self
}
/// <p> The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to receive notifications for updates to the opsItem. </p>
pub fn set_ops_item_sns_topic_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_ops_item_sns_topic_arn(input);
self
}
/// Appends an item to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>List of tags to add to the application. tag key (<code>Key</code>) and an associated tag value (<code>Value</code>). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.</p>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
self.inner = self.inner.tags(input);
self
}
/// <p>List of tags to add to the application. tag key (<code>Key</code>) and an associated tag value (<code>Value</code>). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn auto_config_enabled(mut self, input: bool) -> Self {
self.inner = self.inner.auto_config_enabled(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_auto_config_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_auto_config_enabled(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn auto_create(mut self, input: bool) -> Self {
self.inner = self.inner.auto_create(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_auto_create(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_auto_create(input);
self
}
}
/// Fluent builder constructing a request to `CreateComponent`.
///
/// <p>Creates a custom component by grouping similar standalone instances to monitor.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct CreateComponent<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_component_input::Builder,
}
impl<C, M, R> CreateComponent<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateComponent`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateComponentOutput,
aws_smithy_http::result::SdkError<crate::error::CreateComponentError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateComponentInputOperationOutputAlias,
crate::output::CreateComponentOutput,
crate::error::CreateComponentError,
crate::input::CreateComponentInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the component.</p>
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
/// <p>The name of the component.</p>
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
/// Appends an item to `ResourceList`.
///
/// To override the contents of this collection use [`set_resource_list`](Self::set_resource_list).
///
/// <p>The list of resource ARNs that belong to the component.</p>
pub fn resource_list(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_list(input.into());
self
}
/// <p>The list of resource ARNs that belong to the component.</p>
pub fn set_resource_list(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_resource_list(input);
self
}
}
/// Fluent builder constructing a request to `CreateLogPattern`.
///
/// <p>Adds an log pattern to a <code>LogPatternSet</code>.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct CreateLogPattern<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_log_pattern_input::Builder,
}
impl<C, M, R> CreateLogPattern<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateLogPattern`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateLogPatternOutput,
aws_smithy_http::result::SdkError<crate::error::CreateLogPatternError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateLogPatternInputOperationOutputAlias,
crate::output::CreateLogPatternOutput,
crate::error::CreateLogPatternError,
crate::input::CreateLogPatternInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the log pattern set.</p>
pub fn pattern_set_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_set_name(input.into());
self
}
/// <p>The name of the log pattern set.</p>
pub fn set_pattern_set_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_pattern_set_name(input);
self
}
/// <p>The name of the log pattern.</p>
pub fn pattern_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_name(input.into());
self
}
/// <p>The name of the log pattern.</p>
pub fn set_pattern_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_pattern_name(input);
self
}
/// <p>The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.</p>
pub fn pattern(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern(input.into());
self
}
/// <p>The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.</p>
pub fn set_pattern(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_pattern(input);
self
}
/// <p>Rank of the log pattern. Must be a value between <code>1</code> and <code>1,000,000</code>. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank <code>1</code> will be the first to get matched to a log line. A pattern of rank <code>1,000,000</code> will be last to get matched. When you configure custom log patterns from the console, a <code>Low</code> severity pattern translates to a <code>750,000</code> rank. A <code>Medium</code> severity pattern translates to a <code>500,000</code> rank. And a <code>High</code> severity pattern translates to a <code>250,000</code> rank. Rank values less than <code>1</code> or greater than <code>1,000,000</code> are reserved for AWS-provided patterns. </p>
pub fn rank(mut self, input: i32) -> Self {
self.inner = self.inner.rank(input);
self
}
/// <p>Rank of the log pattern. Must be a value between <code>1</code> and <code>1,000,000</code>. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank <code>1</code> will be the first to get matched to a log line. A pattern of rank <code>1,000,000</code> will be last to get matched. When you configure custom log patterns from the console, a <code>Low</code> severity pattern translates to a <code>750,000</code> rank. A <code>Medium</code> severity pattern translates to a <code>500,000</code> rank. And a <code>High</code> severity pattern translates to a <code>250,000</code> rank. Rank values less than <code>1</code> or greater than <code>1,000,000</code> are reserved for AWS-provided patterns. </p>
pub fn set_rank(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_rank(input);
self
}
}
/// Fluent builder constructing a request to `DeleteApplication`.
///
/// <p>Removes the specified application from monitoring. Does not delete the application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DeleteApplication<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_application_input::Builder,
}
impl<C, M, R> DeleteApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteApplicationInputOperationOutputAlias,
crate::output::DeleteApplicationOutput,
crate::error::DeleteApplicationError,
crate::input::DeleteApplicationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteComponent`.
///
/// <p>Ungroups a custom component. When you ungroup custom components, all applicable monitors that are set up for the component are removed and the instances revert to their standalone status.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DeleteComponent<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_component_input::Builder,
}
impl<C, M, R> DeleteComponent<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteComponent`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteComponentOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteComponentError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteComponentInputOperationOutputAlias,
crate::output::DeleteComponentOutput,
crate::error::DeleteComponentError,
crate::input::DeleteComponentInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the component.</p>
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
/// <p>The name of the component.</p>
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
}
/// Fluent builder constructing a request to `DeleteLogPattern`.
///
/// <p>Removes the specified log pattern from a <code>LogPatternSet</code>.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DeleteLogPattern<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_log_pattern_input::Builder,
}
impl<C, M, R> DeleteLogPattern<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteLogPattern`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteLogPatternOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteLogPatternError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteLogPatternInputOperationOutputAlias,
crate::output::DeleteLogPatternOutput,
crate::error::DeleteLogPatternError,
crate::input::DeleteLogPatternInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the log pattern set.</p>
pub fn pattern_set_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_set_name(input.into());
self
}
/// <p>The name of the log pattern set.</p>
pub fn set_pattern_set_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_pattern_set_name(input);
self
}
/// <p>The name of the log pattern.</p>
pub fn pattern_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_name(input.into());
self
}
/// <p>The name of the log pattern.</p>
pub fn set_pattern_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_pattern_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeApplication`.
///
/// <p>Describes the application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeApplication<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_application_input::Builder,
}
impl<C, M, R> DescribeApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeApplicationInputOperationOutputAlias,
crate::output::DescribeApplicationOutput,
crate::error::DescribeApplicationError,
crate::input::DescribeApplicationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeComponent`.
///
/// <p>Describes a component and lists the resources that are grouped together in a component.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeComponent<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_component_input::Builder,
}
impl<C, M, R> DescribeComponent<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeComponent`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeComponentOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeComponentError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeComponentInputOperationOutputAlias,
crate::output::DescribeComponentOutput,
crate::error::DescribeComponentError,
crate::input::DescribeComponentInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the component.</p>
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
/// <p>The name of the component.</p>
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeComponentConfiguration`.
///
/// <p>Describes the monitoring configuration of the component.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeComponentConfiguration<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_component_configuration_input::Builder,
}
impl<C, M, R> DescribeComponentConfiguration<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeComponentConfiguration`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeComponentConfigurationOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeComponentConfigurationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeComponentConfigurationInputOperationOutputAlias,
crate::output::DescribeComponentConfigurationOutput,
crate::error::DescribeComponentConfigurationError,
crate::input::DescribeComponentConfigurationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the component.</p>
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
/// <p>The name of the component.</p>
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeComponentConfigurationRecommendation`.
///
/// <p>Describes the recommended monitoring configuration of the component.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeComponentConfigurationRecommendation<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_component_configuration_recommendation_input::Builder,
}
impl<C, M, R> DescribeComponentConfigurationRecommendation<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeComponentConfigurationRecommendation`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeComponentConfigurationRecommendationOutput,
aws_smithy_http::result::SdkError<
crate::error::DescribeComponentConfigurationRecommendationError,
>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeComponentConfigurationRecommendationInputOperationOutputAlias,
crate::output::DescribeComponentConfigurationRecommendationOutput,
crate::error::DescribeComponentConfigurationRecommendationError,
crate::input::DescribeComponentConfigurationRecommendationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the component.</p>
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
/// <p>The name of the component.</p>
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
/// <p>The tier of the application component. Supported tiers include <code>DOT_NET_CORE</code>, <code>DOT_NET_WORKER</code>, <code>DOT_NET_WEB</code>, <code>SQL_SERVER</code>, and <code>DEFAULT</code>.</p>
pub fn tier(mut self, input: crate::model::Tier) -> Self {
self.inner = self.inner.tier(input);
self
}
/// <p>The tier of the application component. Supported tiers include <code>DOT_NET_CORE</code>, <code>DOT_NET_WORKER</code>, <code>DOT_NET_WEB</code>, <code>SQL_SERVER</code>, and <code>DEFAULT</code>.</p>
pub fn set_tier(mut self, input: std::option::Option<crate::model::Tier>) -> Self {
self.inner = self.inner.set_tier(input);
self
}
}
/// Fluent builder constructing a request to `DescribeLogPattern`.
///
/// <p>Describe a specific log pattern from a <code>LogPatternSet</code>.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeLogPattern<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_log_pattern_input::Builder,
}
impl<C, M, R> DescribeLogPattern<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeLogPattern`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeLogPatternOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeLogPatternError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeLogPatternInputOperationOutputAlias,
crate::output::DescribeLogPatternOutput,
crate::error::DescribeLogPatternError,
crate::input::DescribeLogPatternInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the log pattern set.</p>
pub fn pattern_set_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_set_name(input.into());
self
}
/// <p>The name of the log pattern set.</p>
pub fn set_pattern_set_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_pattern_set_name(input);
self
}
/// <p>The name of the log pattern.</p>
pub fn pattern_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_name(input.into());
self
}
/// <p>The name of the log pattern.</p>
pub fn set_pattern_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_pattern_name(input);
self
}
}
/// Fluent builder constructing a request to `DescribeObservation`.
///
/// <p>Describes an anomaly or error with the application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeObservation<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_observation_input::Builder,
}
impl<C, M, R> DescribeObservation<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeObservation`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeObservationOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeObservationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeObservationInputOperationOutputAlias,
crate::output::DescribeObservationOutput,
crate::error::DescribeObservationError,
crate::input::DescribeObservationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID of the observation.</p>
pub fn observation_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.observation_id(input.into());
self
}
/// <p>The ID of the observation.</p>
pub fn set_observation_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_observation_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeProblem`.
///
/// <p>Describes an application problem.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeProblem<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_problem_input::Builder,
}
impl<C, M, R> DescribeProblem<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeProblem`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeProblemOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeProblemError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeProblemInputOperationOutputAlias,
crate::output::DescribeProblemOutput,
crate::error::DescribeProblemError,
crate::input::DescribeProblemInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID of the problem.</p>
pub fn problem_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.problem_id(input.into());
self
}
/// <p>The ID of the problem.</p>
pub fn set_problem_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_problem_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeProblemObservations`.
///
/// <p>Describes the anomalies or errors associated with the problem.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeProblemObservations<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_problem_observations_input::Builder,
}
impl<C, M, R> DescribeProblemObservations<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeProblemObservations`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeProblemObservationsOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeProblemObservationsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeProblemObservationsInputOperationOutputAlias,
crate::output::DescribeProblemObservationsOutput,
crate::error::DescribeProblemObservationsError,
crate::input::DescribeProblemObservationsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID of the problem.</p>
pub fn problem_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.problem_id(input.into());
self
}
/// <p>The ID of the problem.</p>
pub fn set_problem_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_problem_id(input);
self
}
}
/// Fluent builder constructing a request to `ListApplications`.
///
/// <p>Lists the IDs of the applications that you are monitoring. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListApplications<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_applications_input::Builder,
}
impl<C, M, R> ListApplications<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListApplications`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListApplicationsOutput,
aws_smithy_http::result::SdkError<crate::error::ListApplicationsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListApplicationsInputOperationOutputAlias,
crate::output::ListApplicationsOutput,
crate::error::ListApplicationsError,
crate::input::ListApplicationsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListApplicationsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListApplicationsPaginator<C, M, R> {
crate::paginator::ListApplicationsPaginator::new(self.handle, self.inner)
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The token to request the next page of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p>The token to request the next page of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListComponents`.
///
/// <p>Lists the auto-grouped, standalone, and custom components of the application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListComponents<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_components_input::Builder,
}
impl<C, M, R> ListComponents<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListComponents`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListComponentsOutput,
aws_smithy_http::result::SdkError<crate::error::ListComponentsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListComponentsInputOperationOutputAlias,
crate::output::ListComponentsOutput,
crate::error::ListComponentsError,
crate::input::ListComponentsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListComponentsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListComponentsPaginator<C, M, R> {
crate::paginator::ListComponentsPaginator::new(self.handle, self.inner)
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The token to request the next page of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p>The token to request the next page of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListConfigurationHistory`.
///
/// <p> Lists the INFO, WARN, and ERROR events for periodic configuration updates performed by Application Insights. Examples of events represented are: </p>
/// <ul>
/// <li> <p>INFO: creating a new alarm or updating an alarm threshold.</p> </li>
/// <li> <p>WARN: alarm not created due to insufficient data points used to predict thresholds.</p> </li>
/// <li> <p>ERROR: alarm not created due to permission errors or exceeding quotas. </p> </li>
/// </ul>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListConfigurationHistory<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_configuration_history_input::Builder,
}
impl<C, M, R> ListConfigurationHistory<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListConfigurationHistory`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListConfigurationHistoryOutput,
aws_smithy_http::result::SdkError<crate::error::ListConfigurationHistoryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListConfigurationHistoryInputOperationOutputAlias,
crate::output::ListConfigurationHistoryOutput,
crate::error::ListConfigurationHistoryError,
crate::input::ListConfigurationHistoryInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListConfigurationHistoryPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(
self,
) -> crate::paginator::ListConfigurationHistoryPaginator<C, M, R> {
crate::paginator::ListConfigurationHistoryPaginator::new(self.handle, self.inner)
}
/// <p>Resource group to which the application belongs. </p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>Resource group to which the application belongs. </p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The start time of the event. </p>
pub fn start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.start_time(input);
self
}
/// <p>The start time of the event. </p>
pub fn set_start_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_start_time(input);
self
}
/// <p>The end time of the event.</p>
pub fn end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.end_time(input);
self
}
/// <p>The end time of the event.</p>
pub fn set_end_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_end_time(input);
self
}
/// <p>The status of the configuration update event. Possible values include INFO, WARN, and ERROR.</p>
pub fn event_status(mut self, input: crate::model::ConfigurationEventStatus) -> Self {
self.inner = self.inner.event_status(input);
self
}
/// <p>The status of the configuration update event. Possible values include INFO, WARN, and ERROR.</p>
pub fn set_event_status(
mut self,
input: std::option::Option<crate::model::ConfigurationEventStatus>,
) -> Self {
self.inner = self.inner.set_event_status(input);
self
}
/// <p> The maximum number of results returned by <code>ListConfigurationHistory</code> in paginated output. When this parameter is used, <code>ListConfigurationHistory</code> returns only <code>MaxResults</code> in a single page along with a <code>NextToken</code> response element. The remaining results of the initial request can be seen by sending another <code>ListConfigurationHistory</code> request with the returned <code>NextToken</code> value. If this parameter is not used, then <code>ListConfigurationHistory</code> returns all results. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p> The maximum number of results returned by <code>ListConfigurationHistory</code> in paginated output. When this parameter is used, <code>ListConfigurationHistory</code> returns only <code>MaxResults</code> in a single page along with a <code>NextToken</code> response element. The remaining results of the initial request can be seen by sending another <code>ListConfigurationHistory</code> request with the returned <code>NextToken</code> value. If this parameter is not used, then <code>ListConfigurationHistory</code> returns all results. </p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The <code>NextToken</code> value returned from a previous paginated <code>ListConfigurationHistory</code> request where <code>MaxResults</code> was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the <code>NextToken</code> value. This value is <code>null</code> when there are no more results to return.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p>The <code>NextToken</code> value returned from a previous paginated <code>ListConfigurationHistory</code> request where <code>MaxResults</code> was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the <code>NextToken</code> value. This value is <code>null</code> when there are no more results to return.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListLogPatterns`.
///
/// <p>Lists the log patterns in the specific log <code>LogPatternSet</code>.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListLogPatterns<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_log_patterns_input::Builder,
}
impl<C, M, R> ListLogPatterns<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListLogPatterns`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListLogPatternsOutput,
aws_smithy_http::result::SdkError<crate::error::ListLogPatternsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListLogPatternsInputOperationOutputAlias,
crate::output::ListLogPatternsOutput,
crate::error::ListLogPatternsError,
crate::input::ListLogPatternsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListLogPatternsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListLogPatternsPaginator<C, M, R> {
crate::paginator::ListLogPatternsPaginator::new(self.handle, self.inner)
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the log pattern set.</p>
pub fn pattern_set_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_set_name(input.into());
self
}
/// <p>The name of the log pattern set.</p>
pub fn set_pattern_set_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_pattern_set_name(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The token to request the next page of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p>The token to request the next page of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListLogPatternSets`.
///
/// <p>Lists the log pattern sets in the specific application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListLogPatternSets<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_log_pattern_sets_input::Builder,
}
impl<C, M, R> ListLogPatternSets<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListLogPatternSets`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListLogPatternSetsOutput,
aws_smithy_http::result::SdkError<crate::error::ListLogPatternSetsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListLogPatternSetsInputOperationOutputAlias,
crate::output::ListLogPatternSetsOutput,
crate::error::ListLogPatternSetsError,
crate::input::ListLogPatternSetsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListLogPatternSetsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListLogPatternSetsPaginator<C, M, R> {
crate::paginator::ListLogPatternSetsPaginator::new(self.handle, self.inner)
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The token to request the next page of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p>The token to request the next page of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListProblems`.
///
/// <p>Lists the problems with your application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListProblems<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_problems_input::Builder,
}
impl<C, M, R> ListProblems<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListProblems`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListProblemsOutput,
aws_smithy_http::result::SdkError<crate::error::ListProblemsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListProblemsInputOperationOutputAlias,
crate::output::ListProblemsOutput,
crate::error::ListProblemsError,
crate::input::ListProblemsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListProblemsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListProblemsPaginator<C, M, R> {
crate::paginator::ListProblemsPaginator::new(self.handle, self.inner)
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The time when the problem was detected, in epoch seconds. If you don't specify a time frame for the request, problems within the past seven days are returned.</p>
pub fn start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.start_time(input);
self
}
/// <p>The time when the problem was detected, in epoch seconds. If you don't specify a time frame for the request, problems within the past seven days are returned.</p>
pub fn set_start_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_start_time(input);
self
}
/// <p>The time when the problem ended, in epoch seconds. If not specified, problems within the past seven days are returned.</p>
pub fn end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.inner = self.inner.end_time(input);
self
}
/// <p>The time when the problem ended, in epoch seconds. If not specified, problems within the past seven days are returned.</p>
pub fn set_end_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.inner = self.inner.set_end_time(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p>The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned <code>NextToken</code> value.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The token to request the next page of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p>The token to request the next page of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
}
/// Fluent builder constructing a request to `ListTagsForResource`.
///
/// <p>Retrieve a list of the tags (keys and values) that are associated with a specified application. A <i>tag</i> is a label that you optionally define and associate with an application. Each tag consists of a required <i>tag key</i> and an optional associated <i>tag value</i>. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListTagsForResource<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_tags_for_resource_input::Builder,
}
impl<C, M, R> ListTagsForResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListTagsForResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsForResourceInputOperationOutputAlias,
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
crate::input::ListTagsForResourceInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the application that you want to retrieve tag information for.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the application that you want to retrieve tag information for.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
}
/// Fluent builder constructing a request to `TagResource`.
///
/// <p>Add one or more tags (keys and values) to a specified application. A <i>tag</i> is a label that you optionally define and associate with an application. Tags can help you categorize and manage application in different ways, such as by purpose, owner, environment, or other criteria. </p>
/// <p>Each tag consists of a required <i>tag key</i> and an associated <i>tag value</i>, both of which you define. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct TagResource<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::tag_resource_input::Builder,
}
impl<C, M, R> TagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TagResourceInputOperationOutputAlias,
crate::output::TagResourceOutput,
crate::error::TagResourceError,
crate::input::TagResourceInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the application that you want to add one or more tags to.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the application that you want to add one or more tags to.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of tags that to add to the application. A tag consists of a required tag key (<code>Key</code>) and an associated tag value (<code>Value</code>). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.</p>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
self.inner = self.inner.tags(input);
self
}
/// <p>A list of tags that to add to the application. A tag consists of a required tag key (<code>Key</code>) and an associated tag value (<code>Value</code>). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `UntagResource`.
///
/// <p>Remove one or more tags (keys and values) from a specified application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UntagResource<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::untag_resource_input::Builder,
}
impl<C, M, R> UntagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UntagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UntagResourceInputOperationOutputAlias,
crate::output::UntagResourceOutput,
crate::error::UntagResourceError,
crate::input::UntagResourceInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the application that you want to remove one or more tags from.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the application that you want to remove one or more tags from.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `TagKeys`.
///
/// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
///
/// <p>The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.</p>
/// <p>To remove more than one tag from the application, append the <code>TagKeys</code> parameter and argument for each additional tag to remove, separated by an ampersand. </p>
pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(input.into());
self
}
/// <p>The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.</p>
/// <p>To remove more than one tag from the application, append the <code>TagKeys</code> parameter and argument for each additional tag to remove, separated by an ampersand. </p>
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
/// Fluent builder constructing a request to `UpdateApplication`.
///
/// <p>Updates the application.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateApplication<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_application_input::Builder,
}
impl<C, M, R> UpdateApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateApplicationInputOperationOutputAlias,
crate::output::UpdateApplicationOutput,
crate::error::UpdateApplicationError,
crate::input::UpdateApplicationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p> When set to <code>true</code>, creates opsItems for any problems detected on an application. </p>
pub fn ops_center_enabled(mut self, input: bool) -> Self {
self.inner = self.inner.ops_center_enabled(input);
self
}
/// <p> When set to <code>true</code>, creates opsItems for any problems detected on an application. </p>
pub fn set_ops_center_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_ops_center_enabled(input);
self
}
/// <p> Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as <code>instance terminated</code>, <code>failed deployment</code>, and others. </p>
pub fn cwe_monitor_enabled(mut self, input: bool) -> Self {
self.inner = self.inner.cwe_monitor_enabled(input);
self
}
/// <p> Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as <code>instance terminated</code>, <code>failed deployment</code>, and others. </p>
pub fn set_cwe_monitor_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_cwe_monitor_enabled(input);
self
}
/// <p> The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to receive notifications for updates to the opsItem.</p>
pub fn ops_item_sns_topic_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.ops_item_sns_topic_arn(input.into());
self
}
/// <p> The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to receive notifications for updates to the opsItem.</p>
pub fn set_ops_item_sns_topic_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_ops_item_sns_topic_arn(input);
self
}
/// <p> Disassociates the SNS topic from the opsItem created for detected problems.</p>
pub fn remove_sns_topic(mut self, input: bool) -> Self {
self.inner = self.inner.remove_sns_topic(input);
self
}
/// <p> Disassociates the SNS topic from the opsItem created for detected problems.</p>
pub fn set_remove_sns_topic(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_remove_sns_topic(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn auto_config_enabled(mut self, input: bool) -> Self {
self.inner = self.inner.auto_config_enabled(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_auto_config_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_auto_config_enabled(input);
self
}
}
/// Fluent builder constructing a request to `UpdateComponent`.
///
/// <p>Updates the custom component name and/or the list of resources that make up the component.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateComponent<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_component_input::Builder,
}
impl<C, M, R> UpdateComponent<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateComponent`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateComponentOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateComponentError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateComponentInputOperationOutputAlias,
crate::output::UpdateComponentOutput,
crate::error::UpdateComponentError,
crate::input::UpdateComponentInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the component.</p>
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
/// <p>The name of the component.</p>
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
/// <p>The new name of the component.</p>
pub fn new_component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.new_component_name(input.into());
self
}
/// <p>The new name of the component.</p>
pub fn set_new_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_new_component_name(input);
self
}
/// Appends an item to `ResourceList`.
///
/// To override the contents of this collection use [`set_resource_list`](Self::set_resource_list).
///
/// <p>The list of resource ARNs that belong to the component.</p>
pub fn resource_list(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_list(input.into());
self
}
/// <p>The list of resource ARNs that belong to the component.</p>
pub fn set_resource_list(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_resource_list(input);
self
}
}
/// Fluent builder constructing a request to `UpdateComponentConfiguration`.
///
/// <p>Updates the monitoring configurations for the component. The configuration input parameter is an escaped JSON of the configuration and should match the schema of what is returned by <code>DescribeComponentConfigurationRecommendation</code>. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateComponentConfiguration<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_component_configuration_input::Builder,
}
impl<C, M, R> UpdateComponentConfiguration<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateComponentConfiguration`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateComponentConfigurationOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateComponentConfigurationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateComponentConfigurationInputOperationOutputAlias,
crate::output::UpdateComponentConfigurationOutput,
crate::error::UpdateComponentConfigurationError,
crate::input::UpdateComponentConfigurationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the component.</p>
pub fn component_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_name(input.into());
self
}
/// <p>The name of the component.</p>
pub fn set_component_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_name(input);
self
}
/// <p>Indicates whether the application component is monitored.</p>
pub fn monitor(mut self, input: bool) -> Self {
self.inner = self.inner.monitor(input);
self
}
/// <p>Indicates whether the application component is monitored.</p>
pub fn set_monitor(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_monitor(input);
self
}
/// <p>The tier of the application component. Supported tiers include <code>DOT_NET_WORKER</code>, <code>DOT_NET_WEB</code>, <code>DOT_NET_CORE</code>, <code>SQL_SERVER</code>, and <code>DEFAULT</code>.</p>
pub fn tier(mut self, input: crate::model::Tier) -> Self {
self.inner = self.inner.tier(input);
self
}
/// <p>The tier of the application component. Supported tiers include <code>DOT_NET_WORKER</code>, <code>DOT_NET_WEB</code>, <code>DOT_NET_CORE</code>, <code>SQL_SERVER</code>, and <code>DEFAULT</code>.</p>
pub fn set_tier(mut self, input: std::option::Option<crate::model::Tier>) -> Self {
self.inner = self.inner.set_tier(input);
self
}
/// <p>The configuration settings of the component. The value is the escaped JSON of the configuration. For more information about the JSON format, see <a href="https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/working-with-json.html">Working with JSON</a>. You can send a request to <code>DescribeComponentConfigurationRecommendation</code> to see the recommended configuration for a component. For the complete format of the component configuration file, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/component-config.html">Component Configuration</a>.</p>
pub fn component_configuration(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.component_configuration(input.into());
self
}
/// <p>The configuration settings of the component. The value is the escaped JSON of the configuration. For more information about the JSON format, see <a href="https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/working-with-json.html">Working with JSON</a>. You can send a request to <code>DescribeComponentConfigurationRecommendation</code> to see the recommended configuration for a component. For the complete format of the component configuration file, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/component-config.html">Component Configuration</a>.</p>
pub fn set_component_configuration(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_component_configuration(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn auto_config_enabled(mut self, input: bool) -> Self {
self.inner = self.inner.auto_config_enabled(input);
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_auto_config_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_auto_config_enabled(input);
self
}
}
/// Fluent builder constructing a request to `UpdateLogPattern`.
///
/// <p>Adds a log pattern to a <code>LogPatternSet</code>.</p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateLogPattern<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_log_pattern_input::Builder,
}
impl<C, M, R> UpdateLogPattern<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateLogPattern`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateLogPatternOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateLogPatternError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateLogPatternInputOperationOutputAlias,
crate::output::UpdateLogPatternOutput,
crate::error::UpdateLogPatternError,
crate::input::UpdateLogPatternInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the resource group.</p>
pub fn resource_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_group_name(input.into());
self
}
/// <p>The name of the resource group.</p>
pub fn set_resource_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_group_name(input);
self
}
/// <p>The name of the log pattern set.</p>
pub fn pattern_set_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_set_name(input.into());
self
}
/// <p>The name of the log pattern set.</p>
pub fn set_pattern_set_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_pattern_set_name(input);
self
}
/// <p>The name of the log pattern.</p>
pub fn pattern_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern_name(input.into());
self
}
/// <p>The name of the log pattern.</p>
pub fn set_pattern_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_pattern_name(input);
self
}
/// <p>The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.</p>
pub fn pattern(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.pattern(input.into());
self
}
/// <p>The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.</p>
pub fn set_pattern(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_pattern(input);
self
}
/// <p>Rank of the log pattern. Must be a value between <code>1</code> and <code>1,000,000</code>. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank <code>1</code> will be the first to get matched to a log line. A pattern of rank <code>1,000,000</code> will be last to get matched. When you configure custom log patterns from the console, a <code>Low</code> severity pattern translates to a <code>750,000</code> rank. A <code>Medium</code> severity pattern translates to a <code>500,000</code> rank. And a <code>High</code> severity pattern translates to a <code>250,000</code> rank. Rank values less than <code>1</code> or greater than <code>1,000,000</code> are reserved for AWS-provided patterns. </p>
pub fn rank(mut self, input: i32) -> Self {
self.inner = self.inner.rank(input);
self
}
/// <p>Rank of the log pattern. Must be a value between <code>1</code> and <code>1,000,000</code>. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank <code>1</code> will be the first to get matched to a log line. A pattern of rank <code>1,000,000</code> will be last to get matched. When you configure custom log patterns from the console, a <code>Low</code> severity pattern translates to a <code>750,000</code> rank. A <code>Medium</code> severity pattern translates to a <code>500,000</code> rank. And a <code>High</code> severity pattern translates to a <code>250,000</code> rank. Rank values less than <code>1</code> or greater than <code>1,000,000</code> are reserved for AWS-provided patterns. </p>
pub fn set_rank(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_rank(input);
self
}
}
}
impl<C> Client<C, crate::middleware::DefaultMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::new()
.connector(conn)
.middleware(crate::middleware::DefaultMiddleware::new());
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
crate::middleware::DefaultMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::dyn_https()
.middleware(crate::middleware::DefaultMiddleware::new());
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
// the builder maintains a try-state. To avoid suppressing the warning when sleep is unset,
// only set it if we actually have a sleep impl.
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| 57.509872 | 946 | 0.637254 |
162b16ab78843c7de3378a29dd95f2b1d6c2a6de | 14,452 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::vm_validator::{TransactionValidation, VMValidator};
use config_builder::util::get_test_config;
use executor::Executor;
use futures::future::Future;
use grpc_helpers::ServerHandle;
use grpcio::EnvBuilder;
use libra_config::config::NodeConfig;
use libra_crypto::ed25519::*;
use libra_types::{
account_address, account_config,
test_helpers::transaction_test_helpers,
transaction::{Module, Script, TransactionArgument, MAX_TRANSACTION_SIZE_IN_BYTES},
vm_error::StatusCode,
};
use rand::SeedableRng;
use std::{sync::Arc, u64};
use storage_client::{StorageRead, StorageReadServiceClient, StorageWriteServiceClient};
use storage_service::start_storage_service;
use transaction_builder::encode_transfer_script;
use vm_runtime::MoveVM;
struct TestValidator {
_storage: ServerHandle,
vm_validator: VMValidator,
}
impl TestValidator {
fn new(config: &NodeConfig) -> Self {
let storage = start_storage_service(&config);
// setup execution
let client_env = Arc::new(EnvBuilder::new().build());
let storage_read_client: Arc<dyn StorageRead> = Arc::new(StorageReadServiceClient::new(
Arc::clone(&client_env),
&config.storage.address,
config.storage.port,
));
let storage_write_client = Arc::new(StorageWriteServiceClient::new(
Arc::clone(&client_env),
&config.storage.address,
config.storage.port,
None,
));
// Create executor to initialize genesis state. Otherwise gprc will report error when
// fetching data from storage.
let _executor = Executor::<MoveVM>::new(
Arc::clone(&storage_read_client) as Arc<dyn StorageRead>,
storage_write_client,
config,
);
let vm_validator = VMValidator::new(config, storage_read_client);
TestValidator {
_storage: storage,
vm_validator,
}
}
}
impl std::ops::Deref for TestValidator {
type Target = VMValidator;
fn deref(&self) -> &Self::Target {
&self.vm_validator
}
}
// These tests are meant to test all high-level code paths that lead to a validation error in the
// verification of a transaction in the VM. However, there are a couple notable exceptions that we
// do _not_ test here -- this is due to limitations around execution and semantics. The following
// errors are not exercised:
// * Sequence number too old -- We can't test sequence number too old here without running execution
// first in order to bump the account's sequence number. This needs to (and is) tested in the
// language e2e tests in: libra/language/e2e-tests/src/tests/verify_txn.rs ->
// verify_simple_payment.
// * Errors arising from deserializing the code -- these are tested in
// - libra/language/vm/src/unit_tests/deserializer_tests.rs
// - libra/language/vm/tests/serializer_tests.rs
// * Errors arising from calls to `static_verify_program` -- this is tested separately in tests for
// the bytecode verifier.
// * Testing for invalid genesis write sets -- this is tested in
// libra/language/e2e-tests/src/tests/genesis.rs
#[test]
fn test_validate_transaction() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let program = encode_transfer_script(&address, 100);
let transaction = transaction_test_helpers::get_test_signed_txn(
address,
1,
keypair.private_key,
keypair.public_key,
Some(program),
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret, None);
}
#[test]
fn test_validate_invalid_signature() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let mut rng = ::rand::rngs::StdRng::from_seed([1u8; 32]);
let (other_private_key, _) = compat::generate_keypair(&mut rng);
// Submit with an account using an different private/public keypair
let address = account_config::association_address();
let program = encode_transfer_script(&address, 100);
let transaction = transaction_test_helpers::get_test_unchecked_txn(
address,
1,
other_private_key,
keypair.public_key,
Some(program),
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret.unwrap().major_status, StatusCode::INVALID_SIGNATURE);
}
#[test]
fn test_validate_known_script_too_large_args() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let txn = transaction_test_helpers::get_test_signed_transaction(
address,
1,
keypair.private_key,
keypair.public_key,
Some(Script::new(vec![42; MAX_TRANSACTION_SIZE_IN_BYTES], vec![])), /* generate a
* program with args
* longer than the
* max size */
0,
0, /* max gas price */
None,
);
let ret = vm_validator.validate_transaction(txn).wait().unwrap();
assert_eq!(
ret.unwrap().major_status,
StatusCode::EXCEEDED_MAX_TRANSACTION_SIZE
);
}
#[test]
fn test_validate_max_gas_units_above_max() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let txn = transaction_test_helpers::get_test_signed_transaction(
address,
1,
keypair.private_key,
keypair.public_key,
None,
0,
0, /* max gas price */
Some(u64::MAX), // Max gas units
);
let ret = vm_validator.validate_transaction(txn).wait().unwrap();
assert_eq!(
ret.unwrap().major_status,
StatusCode::MAX_GAS_UNITS_EXCEEDS_MAX_GAS_UNITS_BOUND
);
}
#[test]
fn test_validate_max_gas_units_below_min() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let txn = transaction_test_helpers::get_test_signed_transaction(
address,
1,
keypair.private_key,
keypair.public_key,
None,
0,
0, /* max gas price */
Some(1), // Max gas units
);
let ret = vm_validator.validate_transaction(txn).wait().unwrap();
assert_eq!(
ret.unwrap().major_status,
StatusCode::MAX_GAS_UNITS_BELOW_MIN_TRANSACTION_GAS_UNITS
);
}
#[test]
fn test_validate_max_gas_price_above_bounds() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let txn = transaction_test_helpers::get_test_signed_transaction(
address,
1,
keypair.private_key,
keypair.public_key,
None,
0,
u64::MAX, /* max gas price */
None,
);
let ret = vm_validator.validate_transaction(txn).wait().unwrap();
assert_eq!(
ret.unwrap().major_status,
StatusCode::GAS_UNIT_PRICE_ABOVE_MAX_BOUND
);
}
// NB: This test is designed to fail if/when we bump the minimum gas price to be non-zero. You will
// then need to update this price here in order to make the test pass -- uncomment the commented
// out assertion and remove the current failing assertion in this case.
#[test]
fn test_validate_max_gas_price_below_bounds() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let program = encode_transfer_script(&address, 100);
let txn = transaction_test_helpers::get_test_signed_transaction(
address,
1,
keypair.private_key,
keypair.public_key,
Some(program),
0,
0, /* max gas price */
None,
);
let ret = vm_validator.validate_transaction(txn).wait().unwrap();
assert_eq!(ret, None);
//assert_eq!(
// ret.unwrap().major_status,
// StatusCode::GAS_UNIT_PRICE_BELOW_MIN_BOUND
//);
}
#[cfg(not(feature = "allow_custom_transaction_scripts"))]
#[test]
fn test_validate_unknown_script() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let transaction = transaction_test_helpers::get_test_signed_txn(
address,
1,
keypair.private_key,
keypair.public_key,
None,
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret.unwrap().major_status, StatusCode::UNKNOWN_SCRIPT);
}
// Make sure that we can't publish non-whitelisted modules
#[cfg(not(feature = "allow_custom_transaction_scripts"))]
#[cfg(not(feature = "custom_modules"))]
#[test]
fn test_validate_module_publishing() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let transaction = transaction_test_helpers::get_test_signed_module_publishing_transaction(
address,
1,
keypair.private_key,
keypair.public_key,
Module::new(vec![]),
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret.unwrap().major_status, StatusCode::UNKNOWN_MODULE);
}
#[test]
fn test_validate_invalid_auth_key() {
let (config, _) = get_test_config();
let vm_validator = TestValidator::new(&config);
let mut rng = ::rand::rngs::StdRng::from_seed([1u8; 32]);
let (other_private_key, other_public_key) = compat::generate_keypair(&mut rng);
// Submit with an account using an different private/public keypair
let address = account_config::association_address();
let program = encode_transfer_script(&address, 100);
let transaction = transaction_test_helpers::get_test_signed_txn(
address,
1,
other_private_key,
other_public_key,
Some(program),
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret.unwrap().major_status, StatusCode::INVALID_AUTH_KEY);
}
#[test]
fn test_validate_balance_below_gas_fee() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let program = encode_transfer_script(&address, 100);
let transaction = transaction_test_helpers::get_test_signed_transaction(
address,
1,
keypair.private_key.clone(),
keypair.public_key,
Some(program),
0,
// Note that this will be dependent upon the max gas price and gas amounts that are set. So
// changing those may cause this test to fail.
10_000, /* max gas price */
Some(1_000_000),
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(
ret.unwrap().major_status,
StatusCode::INSUFFICIENT_BALANCE_FOR_TRANSACTION_FEE
);
}
#[test]
fn test_validate_account_doesnt_exist() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let random_account_addr = account_address::AccountAddress::random();
let program = encode_transfer_script(&address, 100);
let transaction = transaction_test_helpers::get_test_signed_transaction(
random_account_addr,
1,
keypair.private_key,
keypair.public_key,
Some(program),
0,
1, /* max gas price */
None,
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(
ret.unwrap().major_status,
StatusCode::SENDING_ACCOUNT_DOES_NOT_EXIST
);
}
#[test]
fn test_validate_sequence_number_too_new() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let program = encode_transfer_script(&address, 100);
let transaction = transaction_test_helpers::get_test_signed_txn(
address,
1,
keypair.private_key,
keypair.public_key,
Some(program),
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret, None);
}
#[test]
fn test_validate_invalid_arguments() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let (program_script, _) = encode_transfer_script(&address, 100).into_inner();
let program = Script::new(program_script, vec![TransactionArgument::U64(42)]);
let transaction = transaction_test_helpers::get_test_signed_txn(
address,
1,
keypair.private_key,
keypair.public_key,
Some(program),
);
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret.unwrap().major_status, StatusCode::TYPE_MISMATCH);
}
#[test]
fn test_validate_non_genesis_write_set() {
let (config, keypair) = get_test_config();
let vm_validator = TestValidator::new(&config);
let address = account_config::association_address();
let transaction = transaction_test_helpers::get_write_set_txn(
address,
1,
keypair.private_key,
keypair.public_key,
None,
)
.into_inner();
let ret = vm_validator
.validate_transaction(transaction)
.wait()
.unwrap();
assert_eq!(ret.unwrap().major_status, StatusCode::REJECTED_WRITE_SET);
}
| 32.331096 | 100 | 0.656103 |
5d933552fa5e615dfed9e56616cfb468a299ba37 | 3,997 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::STS {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct XFACTORR {
bits: u8,
}
impl XFACTORR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct RESET_RCVR {
bits: bool,
}
impl RESET_RCVR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _XFACTORW<'a> {
w: &'a mut W,
}
impl<'a> _XFACTORW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 15;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _RESET_RCVW<'a> {
w: &'a mut W,
}
impl<'a> _RESET_RCVW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 4:7"]
#[inline]
pub fn xfactor(&self) -> XFACTORR {
let bits = {
const MASK: u8 = 15;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) as u8
};
XFACTORR { bits }
}
#[doc = "Bit 3 - Reset Uart Receiver"]
#[inline]
pub fn reset_rcv(&self) -> RESET_RCVR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
RESET_RCVR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 4:7"]
#[inline]
pub fn xfactor(&mut self) -> _XFACTORW {
_XFACTORW { w: self }
}
#[doc = "Bit 3 - Reset Uart Receiver"]
#[inline]
pub fn reset_rcv(&mut self) -> _RESET_RCVW {
_RESET_RCVW { w: self }
}
}
| 24.224242 | 59 | 0.495872 |
6905db734719e4632f1b038743361637169f294c | 19,667 | /*
* Copyright (C) 2015 Benjamin Fry <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::marker::PhantomData;
use crate::error::{ProtoErrorKind, ProtoResult};
use super::BinEncodable;
use crate::op::Header;
// this is private to make sure there is no accidental access to the inner buffer.
mod private {
use crate::error::{ProtoErrorKind, ProtoResult};
/// A wrapper for a buffer that guarantees writes never exceed a defined set of bytes
pub struct MaximalBuf<'a> {
max_size: usize,
buffer: &'a mut Vec<u8>,
}
impl<'a> MaximalBuf<'a> {
pub fn new(max_size: u16, buffer: &'a mut Vec<u8>) -> Self {
MaximalBuf {
max_size: max_size as usize,
buffer,
}
}
/// Sets the maximum size to enforce
pub fn set_max_size(&mut self, max: u16) {
self.max_size = max as usize;
}
/// returns an error if the maximum buffer size would be exceeded with the addition number of elements
///
/// and reserves the additional space in the buffer
pub fn enforced_write<F>(&mut self, additional: usize, writer: F) -> ProtoResult<()>
where
F: FnOnce(&mut Vec<u8>) -> (),
{
let expected_len = self.buffer.len() + additional;
if expected_len > self.max_size {
Err(ProtoErrorKind::MaxBufferSizeExceeded(self.max_size).into())
} else {
self.buffer.reserve(additional);
writer(self.buffer);
debug_assert_eq!(self.buffer.len(), expected_len);
Ok(())
}
}
/// truncates are always safe
pub fn truncate(&mut self, len: usize) {
self.buffer.truncate(len)
}
/// returns the length of the underlying buffer
pub fn len(&self) -> usize {
self.buffer.len()
}
/// Immutable reads are always safe
pub fn buffer(&'a self) -> &'a [u8] {
self.buffer as &'a [u8]
}
/// Returns a reference to the internal buffer
pub fn into_bytes(self) -> &'a Vec<u8> {
self.buffer
}
}
}
/// Encode DNS messages and resource record types.
pub struct BinEncoder<'a> {
offset: usize,
buffer: private::MaximalBuf<'a>,
/// start and end of label pointers, smallvec here?
name_pointers: Vec<(usize, usize)>,
mode: EncodeMode,
canonical_names: bool,
}
impl<'a> BinEncoder<'a> {
/// Create a new encoder with the Vec to fill
pub fn new(buf: &'a mut Vec<u8>) -> Self {
Self::with_offset(buf, 0, EncodeMode::Normal)
}
/// Specify the mode for encoding
///
/// # Arguments
///
/// * `mode` - In Signing mode, canonical forms of all data are encoded, otherwise format matches the source form
pub fn with_mode(buf: &'a mut Vec<u8>, mode: EncodeMode) -> Self {
Self::with_offset(buf, 0, mode)
}
/// Begins the encoder at the given offset
///
/// This is used for pointers. If this encoder is starting at some point further in
/// the sequence of bytes, for the proper offset of the pointer, the offset accounts for that
/// by using the offset to add to the pointer location being written.
///
/// # Arguments
///
/// * `offset` - index at which to start writing into the buffer
pub fn with_offset(buf: &'a mut Vec<u8>, offset: u32, mode: EncodeMode) -> Self {
if buf.capacity() < 512 {
let reserve = 512 - buf.capacity();
buf.reserve(reserve);
}
BinEncoder {
offset: offset as usize,
// FIXME: add max_size to signature
buffer: private::MaximalBuf::new(u16::max_value(), buf),
name_pointers: Vec::new(),
mode,
canonical_names: false,
}
}
// TODO: move to constructor (kept for backward compatibility)
/// Sets the maximum size of the buffer
///
/// DNS message lens must be smaller than u16::max_value due to hard limits in the protocol
///
/// *this method will move to the constructor in a future release*
pub fn set_max_size(&mut self, max: u16) {
self.buffer.set_max_size(max);
}
/// Returns a reference to the internal buffer
pub fn into_bytes(self) -> &'a Vec<u8> {
self.buffer.into_bytes()
}
/// Returns the length of the buffer
pub fn len(&self) -> usize {
self.buffer.len()
}
/// Returns `true` if the buffer is empty
pub fn is_empty(&self) -> bool {
self.buffer.buffer().is_empty()
}
/// Returns the current offset into the buffer
pub fn offset(&self) -> usize {
self.offset
}
/// sets the current offset to the new offset
pub fn set_offset(&mut self, offset: usize) {
self.offset = offset;
}
/// Returns the current Encoding mode
pub fn mode(&self) -> EncodeMode {
self.mode
}
/// If set to true, then names will be written into the buffer in canonical form
pub fn set_canonical_names(&mut self, canonical_names: bool) {
self.canonical_names = canonical_names;
}
/// Returns true if then encoder is writing in canonical form
pub fn is_canonical_names(&self) -> bool {
self.canonical_names
}
/// Emit all names in canonical form, useful for https://tools.ietf.org/html/rfc3597
pub fn with_canonical_names<F: FnOnce(&mut Self) -> ProtoResult<()>>(
&mut self,
f: F,
) -> ProtoResult<()> {
let was_canonical = self.is_canonical_names();
self.set_canonical_names(true);
let res = f(self);
self.set_canonical_names(was_canonical);
res
}
// TODO: deprecate this...
/// Reserve specified additional length in the internal buffer.
pub fn reserve(&mut self, _additional: usize) -> ProtoResult<()> {
Ok(())
}
/// trims to the current offset
pub fn trim(&mut self) {
let offset = self.offset;
self.buffer.truncate(offset);
self.name_pointers
.retain(|&(start, end)| start < offset && end <= offset);
}
// /// returns an error if the maximum buffer size would be exceeded with the addition number of elements
// ///
// /// and reserves the additional space in the buffer
// fn enforce_size(&mut self, additional: usize) -> ProtoResult<()> {
// if (self.buffer.len() + additional) > self.max_size {
// Err(ProtoErrorKind::MaxBufferSizeExceeded(self.max_size).into())
// } else {
// self.reserve(additional);
// Ok(())
// }
// }
/// borrow a slice from the encoder
pub fn slice_of(&self, start: usize, end: usize) -> &[u8] {
assert!(start < self.offset);
assert!(end <= self.buffer.len());
&self.buffer.buffer()[start..end]
}
/// Stores a label pointer to an already written label
///
/// The location is the current position in the buffer
/// implicitly, it is expected that the name will be written to the stream after the current index.
pub fn store_label_pointer(&mut self, start: usize, end: usize) {
assert!(start <= (u16::max_value() as usize));
assert!(end <= (u16::max_value() as usize));
assert!(start <= end);
if self.offset < 0x3FFF_usize {
self.name_pointers.push((start, end)); // the next char will be at the len() location
}
}
/// Looks up the index of an already written label
pub fn get_label_pointer(&self, start: usize, end: usize) -> Option<u16> {
let search = self.slice_of(start, end);
for &(match_start, match_end) in &self.name_pointers {
let matcher = self.slice_of(match_start as usize, match_end as usize);
if matcher == search {
assert!(match_start <= (u16::max_value() as usize));
return Some(match_start as u16);
}
}
None
}
/// Emit one byte into the buffer
pub fn emit(&mut self, b: u8) -> ProtoResult<()> {
if self.offset < self.buffer.len() {
let offset = self.offset;
self.buffer.enforced_write(0, |buffer| {
*buffer
.get_mut(offset)
.expect("could not get index at offset") = b
})?;
} else {
self.buffer.enforced_write(1, |buffer| buffer.push(b))?;
}
self.offset += 1;
Ok(())
}
/// matches description from above.
///
/// ```
/// use trust_dns_proto::serialize::binary::BinEncoder;
///
/// let mut bytes: Vec<u8> = Vec::new();
/// {
/// let mut encoder: BinEncoder = BinEncoder::new(&mut bytes);
/// encoder.emit_character_data("abc");
/// }
/// assert_eq!(bytes, vec![3,b'a',b'b',b'c']);
/// ```
pub fn emit_character_data<S: AsRef<[u8]>>(&mut self, char_data: S) -> ProtoResult<()> {
let char_bytes = char_data.as_ref();
if char_bytes.len() > 255 {
return Err(ProtoErrorKind::CharacterDataTooLong {
max: 255,
len: char_bytes.len(),
}
.into());
}
// first the length is written
self.emit(char_bytes.len() as u8)?;
self.write_slice(char_bytes)
}
/// Emit one byte into the buffer
pub fn emit_u8(&mut self, data: u8) -> ProtoResult<()> {
self.emit(data)
}
/// Writes a u16 in network byte order to the buffer
pub fn emit_u16(&mut self, data: u16) -> ProtoResult<()> {
self.write_slice(&data.to_be_bytes())
}
/// Writes an i32 in network byte order to the buffer
pub fn emit_i32(&mut self, data: i32) -> ProtoResult<()> {
self.write_slice(&data.to_be_bytes())
}
/// Writes an u32 in network byte order to the buffer
pub fn emit_u32(&mut self, data: u32) -> ProtoResult<()> {
self.write_slice(&data.to_be_bytes())
}
fn write_slice(&mut self, data: &[u8]) -> ProtoResult<()> {
// replacement case, the necessary space should have been reserved already...
if self.offset < self.buffer.len() {
let offset = self.offset;
self.buffer.enforced_write(0, |buffer| {
let mut offset = offset;
for b in data {
*buffer
.get_mut(offset)
.expect("could not get index at offset for slice") = *b;
offset += 1;
}
})?;
self.offset += data.len();
} else {
self.buffer
.enforced_write(data.len(), |buffer| buffer.extend_from_slice(data))?;
self.offset += data.len();
}
Ok(())
}
/// Writes the byte slice to the stream
pub fn emit_vec(&mut self, data: &[u8]) -> ProtoResult<()> {
self.write_slice(data)
}
/// Emits all the elements of an Iterator to the encoder
pub fn emit_all<'e, I: Iterator<Item = &'e E>, E: 'e + BinEncodable>(
&mut self,
mut iter: I,
) -> ProtoResult<usize> {
self.emit_iter(&mut iter)
}
// TODO: dedup with above emit_all
/// Emits all the elements of an Iterator to the encoder
pub fn emit_all_refs<'r, 'e, I, E>(&mut self, iter: I) -> ProtoResult<usize>
where
'e: 'r,
I: Iterator<Item = &'r &'e E>,
E: 'r + 'e + BinEncodable,
{
let mut iter = iter.cloned();
self.emit_iter(&mut iter)
}
/// emits all items in the iterator, return the number emitted
#[allow(clippy::needless_return)]
pub fn emit_iter<'e, I: Iterator<Item = &'e E>, E: 'e + BinEncodable>(
&mut self,
iter: &mut I,
) -> ProtoResult<usize> {
let mut count = 0;
for i in iter {
let rollback = self.set_rollback();
i.emit(self).map_err(|e| {
if let ProtoErrorKind::MaxBufferSizeExceeded(_) = e.kind() {
rollback.rollback(self);
return ProtoErrorKind::NotAllRecordsWritten { count }.into();
} else {
return e;
}
})?;
count += 1;
}
Ok(count)
}
/// capture a location to write back to
pub fn place<T: EncodedSize>(&mut self) -> ProtoResult<Place<T>> {
let index = self.offset;
let len = T::size_of();
// resize the buffer
self.buffer
.enforced_write(len, |buffer| buffer.resize(index + len, 0))?;
// update the offset
self.offset += len;
Ok(Place {
start_index: index,
phantom: PhantomData,
})
}
/// calculates the length of data written since the place was creating
pub fn len_since_place<T: EncodedSize>(&self, place: &Place<T>) -> usize {
(self.offset - place.start_index) - place.size_of()
}
/// write back to a previously captured location
pub fn emit_at<T: EncodedSize>(&mut self, place: Place<T>, data: T) -> ProtoResult<()> {
// preserve current index
let current_index = self.offset;
// reset the current index back to place before writing
// this is an assert because it's programming error for it to be wrong.
assert!(place.start_index < current_index);
self.offset = place.start_index;
// emit the data to be written at this place
let emit_result = data.emit(self);
// double check that the current number of bytes were written
// this is an assert because it's programming error for it to be wrong.
assert!((self.offset - place.start_index) == place.size_of());
// reset to original location
self.offset = current_index;
emit_result
}
fn set_rollback(&self) -> Rollback {
Rollback {
rollback_index: self.offset(),
}
}
}
/// A trait to return the size of a type as it will be encoded in DNS
///
/// it does not necessarily equal `std::mem::size_of`, though it might, especially for primitives
pub trait EncodedSize: BinEncodable {
/// Return the size in bytes of the
fn size_of() -> usize;
}
impl EncodedSize for u16 {
fn size_of() -> usize {
2
}
}
impl EncodedSize for Header {
fn size_of() -> usize {
Header::len()
}
}
#[derive(Debug)]
#[must_use = "data must be written back to the place"]
pub struct Place<T: EncodedSize> {
start_index: usize,
phantom: PhantomData<T>,
}
impl<T: EncodedSize> Place<T> {
pub fn replace(self, encoder: &mut BinEncoder, data: T) -> ProtoResult<()> {
encoder.emit_at(self, data)
}
pub fn size_of(&self) -> usize {
T::size_of()
}
}
/// A type representing a rollback point in a stream
pub struct Rollback {
rollback_index: usize,
}
impl Rollback {
pub fn rollback(self, encoder: &mut BinEncoder) {
encoder.set_offset(self.rollback_index)
}
}
/// In the Verify mode there maybe some things which are encoded differently, e.g. SIG0 records
/// should not be included in the additional count and not in the encoded data when in Verify
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum EncodeMode {
/// In signing mode records are written in canonical form
Signing,
/// Write records in standard format
Normal,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::op::Message;
use crate::serialize::binary::BinDecoder;
#[test]
fn test_label_compression_regression() {
// https://github.com/bluejekyll/trust-dns/issues/339
/*
;; QUESTION SECTION:
;bluedot.is.autonavi.com.gds.alibabadns.com. IN AAAA
;; AUTHORITY SECTION:
gds.alibabadns.com. 1799 IN SOA gdsns1.alibabadns.com. none. 2015080610 1800 600 3600 360
*/
let data: Vec<u8> = vec![
154, 50, 129, 128, 0, 1, 0, 0, 0, 1, 0, 1, 7, 98, 108, 117, 101, 100, 111, 116, 2, 105,
115, 8, 97, 117, 116, 111, 110, 97, 118, 105, 3, 99, 111, 109, 3, 103, 100, 115, 10,
97, 108, 105, 98, 97, 98, 97, 100, 110, 115, 3, 99, 111, 109, 0, 0, 28, 0, 1, 192, 36,
0, 6, 0, 1, 0, 0, 7, 7, 0, 35, 6, 103, 100, 115, 110, 115, 49, 192, 40, 4, 110, 111,
110, 101, 0, 120, 27, 176, 162, 0, 0, 7, 8, 0, 0, 2, 88, 0, 0, 14, 16, 0, 0, 1, 104, 0,
0, 41, 2, 0, 0, 0, 0, 0, 0, 0,
];
let msg = Message::from_vec(&data).unwrap();
msg.to_bytes().unwrap();
}
#[test]
fn test_size_of() {
assert_eq!(u16::size_of(), 2);
}
#[test]
fn test_place() {
let mut buf = vec![];
{
let mut encoder = BinEncoder::new(&mut buf);
let place = encoder.place::<u16>().unwrap();
assert_eq!(place.size_of(), 2);
assert_eq!(encoder.len_since_place(&place), 0);
encoder.emit(42_u8).expect("failed 0");
assert_eq!(encoder.len_since_place(&place), 1);
encoder.emit(48_u8).expect("failed 1");
assert_eq!(encoder.len_since_place(&place), 2);
place
.replace(&mut encoder, 4_u16)
.expect("failed to replace");
drop(encoder);
}
assert_eq!(buf.len(), 4);
let mut decoder = BinDecoder::new(&buf);
let written = decoder.read_u16().expect("cound not read u16").unverified();
assert_eq!(written, 4);
}
#[test]
fn test_max_size() {
let mut buf = vec![];
let mut encoder = BinEncoder::new(&mut buf);
encoder.set_max_size(5);
encoder.emit(0).expect("failed to write");
encoder.emit(1).expect("failed to write");
encoder.emit(2).expect("failed to write");
encoder.emit(3).expect("failed to write");
encoder.emit(4).expect("failed to write");
let error = encoder.emit(5).unwrap_err();
match *error.kind() {
ProtoErrorKind::MaxBufferSizeExceeded(_) => (),
_ => panic!(),
}
}
#[test]
fn test_max_size_0() {
let mut buf = vec![];
let mut encoder = BinEncoder::new(&mut buf);
encoder.set_max_size(0);
let error = encoder.emit(0).unwrap_err();
match *error.kind() {
ProtoErrorKind::MaxBufferSizeExceeded(_) => (),
_ => panic!(),
}
}
#[test]
fn test_max_size_place() {
let mut buf = vec![];
let mut encoder = BinEncoder::new(&mut buf);
encoder.set_max_size(2);
let place = encoder.place::<u16>().expect("place failed");
place.replace(&mut encoder, 16).expect("placeback failed");
let error = encoder.place::<u16>().unwrap_err();
match *error.kind() {
ProtoErrorKind::MaxBufferSizeExceeded(_) => (),
_ => panic!(),
}
}
}
| 31.618971 | 117 | 0.567499 |
87256cedeca647f71379fe1908bb488afcbd2e7f | 1,778 | use super::InputEvent;
use crate::{Comp, Real, SystemMessage};
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)]
pub enum MouseButton {
Left,
Right,
Middle,
Other(u16),
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct MouseDown {
pub pos: MousePos,
pub button: MouseButton,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct MouseScroll {
pub pos: MousePos,
pub delta: (f32, f32),
}
#[derive(Default, Debug, Clone, Copy, PartialEq)]
pub struct MouseController {
last_pos: Option<MousePos>,
last_offset: Option<MousePos>,
}
#[derive(Default, Debug, Clone, Copy, PartialEq)]
pub struct MousePos {
pub x: Real,
pub y: Real,
}
impl MouseController {
pub fn new() -> Self {
MouseController {
last_pos: None,
last_offset: None,
}
}
pub fn update_pos(&mut self, x: Real, y: Real) {
let offset = self
.last_pos
.map(|last| MousePos {
x: x - last.x,
y: last.y - y, // reversed since y-coordinates go from bottom to top
})
.unwrap_or_default();
self.last_pos = Some(MousePos { x, y });
self.last_offset = Some(offset);
}
pub fn last_pos(&self) -> MousePos {
self.last_pos.unwrap_or_default()
}
pub fn pressed_comp(&self, comp: &mut Comp, button: MouseButton) {
let pos = self.last_pos();
comp.send_system_msg(SystemMessage::Input(InputEvent::mouse_down(pos, button)))
}
pub fn mouse_scroll(&self, comp: &mut Comp, delta: (f32, f32)) {
let pos = self.last_pos();
comp.send_system_msg(SystemMessage::Input(InputEvent::mouse_scroll(MouseScroll {
pos,
delta,
})))
}
}
| 24.027027 | 88 | 0.589426 |
4a66a79d7555dfe91b638443c896d9677d7f4b71 | 38,527 | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
crypto::{Hash, PublicKey},
helpers::{Height, ValidateInput},
runtime::{
migrations::MigrationType, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceId, InstanceSpec, InstanceState, InstanceStatus, RuntimeFeature,
},
};
use exonum_derive::{exonum_interface, interface_method};
use exonum_merkledb::ObjectHash;
use std::collections::HashSet;
use super::{
configure::ConfigureMut, migration_state::MigrationState, ArtifactError, AsyncEventState,
CommonError as SupervisorCommonError, ConfigChange, ConfigProposalWithHash, ConfigPropose,
ConfigVote, ConfigurationError, DeployRequest, DeployResult, FreezeService, MigrationError,
MigrationRequest, MigrationResult, ResumeService, SchemaImpl, ServiceError, StartService,
StopService, Supervisor, UnloadArtifact,
};
use exonum::runtime::ArtifactStatus;
/// Supervisor service transactions.
#[allow(clippy::empty_line_after_outer_attr)] // false positive
#[exonum_interface]
pub trait SupervisorInterface<Ctx> {
/// Output generated by the stub.
type Output;
/// Requests artifact deploy.
///
/// This request should be initiated by the validator (and depending on the `Supervisor`
/// mode several other actions can be required, e.g. sending the same request by majority
/// of other validators as well).
/// After that, the supervisor will try to deploy the artifact, and once this procedure
/// is completed, it will send `report_deploy_result` transaction.
#[interface_method(id = 0)]
fn request_artifact_deploy(&self, context: Ctx, artifact: DeployRequest) -> Self::Output;
/// Confirms that the artifact deployment was completed by the validator.
///
/// The artifact is registered in the dispatcher once all validators send successful confirmation.
/// This transaction is sent automatically by a validator node when the local deployment process
/// completes.
#[interface_method(id = 1)]
fn report_deploy_result(&self, context: Ctx, artifact: DeployResult) -> Self::Output;
/// Proposes config change
///
/// This request should be sent by one of validators as the proposition to change
/// current configuration to new one. All another validators are able to vote for this
/// configuration by sending `confirm_config_change` transaction.
/// The configuration application rules depend on the `Supervisor` mode, e.g. confirmations
/// are not required for the `Simple` mode, and for `Decentralized` mode (2/3+1) confirmations
/// are required.
///
/// **Note:** only one proposal at time is possible.
#[interface_method(id = 2)]
fn propose_config_change(&self, context: Ctx, propose: ConfigPropose) -> Self::Output;
/// Confirms config change
///
/// This confirm should be sent by validators to vote for proposed configuration.
/// Vote of the author of the `propose_config_change` transaction is taken into
/// account automatically.
/// The configuration application rules depend on the `Supervisor` mode.
#[interface_method(id = 3)]
fn confirm_config_change(&self, context: Ctx, vote: ConfigVote) -> Self::Output;
/// Requests the data migration.
///
/// This request should be initiated by the validator (and depending on the `Supervisor`
/// mode several other actions can be required, e.g. sending the same request by majority
/// of other validators as well).
/// After that, the core will try to perform the requested migration, and once the migration
/// is finished, supervisor will send `report_deploy_result` transaction.
#[interface_method(id = 4)]
fn request_migration(&self, context: Ctx, request: MigrationRequest) -> Self::Output;
/// Confirms that migration was completed by the validator.
///
/// The migration is applied in the core once all validators send successful confirmation.
/// This transaction is sent automatically by a validator node when the local migration process
/// completes.
#[interface_method(id = 5)]
fn report_migration_result(&self, context: Ctx, result: MigrationResult) -> Self::Output;
}
impl ConfigChange {
fn register_instance(
&self,
modified_instances: &mut HashSet<InstanceId>,
) -> Result<(), ExecutionError> {
let maybe_instance_id = match self {
Self::StopService(service) => Some(service.instance_id),
Self::FreezeService(service) => Some(service.instance_id),
Self::ResumeService(service) => Some(service.instance_id),
Self::Service(service) => Some(service.instance_id),
_ => None,
};
if let Some(instance_id) = maybe_instance_id {
if !modified_instances.insert(instance_id) {
let msg = format!(
"Discarded several actions concerning service with ID {}",
instance_id
);
return Err(ConfigurationError::malformed_propose(msg));
}
}
Ok(())
}
}
impl StartService {
fn validate(&self, context: &ExecutionContext<'_>) -> Result<(), ExecutionError> {
InstanceSpec::is_valid_name(&self.name).map_err(|e| {
let msg = format!("Service name `{}` is invalid: {}", self.name, e);
ServiceError::InvalidInstanceName.with_description(msg)
})?;
// Check that artifact is deployed and active.
let dispatcher_data = context.data().for_dispatcher();
let artifact_state = dispatcher_data
.get_artifact(&self.artifact)
.ok_or_else(|| {
let msg = format!(
"Discarded start of service `{}` from the unknown artifact `{}`.",
self.name, self.artifact,
);
ArtifactError::UnknownArtifact.with_description(msg)
})?;
if artifact_state.status != ArtifactStatus::Active {
let msg = format!(
"Discarded start of service `{}` from the non-active artifact `{}`.",
self.name, self.artifact,
);
return Err(ArtifactError::UnknownArtifact.with_description(msg));
}
// Check that there is no instance with the same name.
if dispatcher_data.get_instance(self.name.as_str()).is_some() {
return Err(ServiceError::InstanceExists.with_description(format!(
"Discarded an attempt to start of the already started instance {}.",
self.name
)));
}
Ok(())
}
}
impl StopService {
fn validate(&self, context: &ExecutionContext<'_>) -> Result<(), ExecutionError> {
validate_status(
context,
self.instance_id,
"stop",
InstanceStatus::can_be_stopped,
)
.map(drop)
}
}
impl FreezeService {
fn validate(&self, context: &ExecutionContext<'_>) -> Result<InstanceState, ExecutionError> {
validate_status(
context,
self.instance_id,
"freeze",
InstanceStatus::can_be_frozen,
)
}
}
impl ResumeService {
fn validate(&self, context: &ExecutionContext<'_>) -> Result<(), ExecutionError> {
let instance = get_instance(context, self.instance_id)?;
let status = instance.status.as_ref();
let can_be_resumed = status.map_or(false, InstanceStatus::can_be_resumed);
if !can_be_resumed {
let status = status.map_or_else(|| "none".to_owned(), ToString::to_string);
let msg = format!(
"Discarded an attempt to resume service `{}` with inappropriate status ({})",
instance.spec.name, status
);
return Err(ConfigurationError::malformed_propose(msg));
}
if instance.associated_artifact().is_none() {
let msg = format!(
"Service `{}` has data version ({}) differing from its artifact version (`{}`) \
and thus cannot be resumed",
instance.spec.name,
instance.data_version(),
instance.spec.artifact
);
return Err(ConfigurationError::malformed_propose(msg));
}
Ok(())
}
}
impl UnloadArtifact {
fn validate(&self, context: &ExecutionContext<'_>) -> Result<(), ExecutionError> {
context
.data()
.for_dispatcher()
.check_unloading_artifact(&self.artifact_id)
.map_err(|e| ConfigurationError::malformed_propose(e.description()))
}
}
/// Checks if method was called by transaction, and transaction author is a validator.
fn get_validator(context: &ExecutionContext<'_>) -> Result<PublicKey, ExecutionError> {
let author = context
.caller()
.author()
.ok_or(CommonError::UnauthorizedCaller)?;
// Verify that transaction author is validator.
context
.data()
.for_core()
.validator_id(author)
.ok_or(CommonError::UnauthorizedCaller)?;
Ok(author)
}
/// Returns the information about a service instance by its identifier.
fn get_instance(
context: &ExecutionContext<'_>,
instance_id: InstanceId,
) -> Result<InstanceState, ExecutionError> {
context
.data()
.for_dispatcher()
.get_instance(instance_id)
.ok_or_else(|| {
let msg = format!(
"Instance with ID {} is absent from the blockchain",
instance_id
);
ConfigurationError::malformed_propose(msg)
})
}
/// Checks that the current service status allows a specified transition.
fn validate_status(
context: &ExecutionContext<'_>,
instance_id: InstanceId,
action: &str,
check_fn: fn(&InstanceStatus) -> bool,
) -> Result<InstanceState, ExecutionError> {
let instance = get_instance(context, instance_id)?;
let status = instance.status.as_ref();
let is_valid_transition = status.map_or(false, check_fn);
if is_valid_transition {
Ok(instance)
} else {
let status = status.map_or_else(|| "none".to_owned(), ToString::to_string);
let msg = format!(
"Discarded an attempt to {} service `{}` with inappropriate status ({})",
action, instance.spec.name, status
);
Err(ConfigurationError::malformed_propose(msg))
}
}
/// Returns the information about a service instance by its name.
pub fn get_instance_by_name(
context: &ExecutionContext<'_>,
service: &str,
) -> Result<InstanceState, ExecutionError> {
context
.data()
.for_dispatcher()
.get_instance(service)
.ok_or_else(|| {
let msg = format!("Instance with name `{}` is absent from blockchain", service);
ConfigurationError::malformed_propose(msg)
})
}
impl SupervisorInterface<ExecutionContext<'_>> for Supervisor {
type Output = Result<(), ExecutionError>;
fn propose_config_change(
&self,
mut context: ExecutionContext<'_>,
mut propose: ConfigPropose,
) -> Self::Output {
let author = get_validator(&context)?;
let current_height = context.data().for_core().height();
// If `actual_from` field is not set, set it to the next height.
if propose.actual_from == Height(0) {
propose.actual_from = current_height.next();
} else if current_height >= propose.actual_from {
// Otherwise verify that the `actual_from` height is in the future.
let msg = format!(
"Actual height for config proposal ({}) is in the past (current height: {}).",
propose.actual_from, current_height
);
return Err(SupervisorCommonError::ActualFromIsPast.with_description(msg));
}
let mut schema = SchemaImpl::new(context.service_data());
// Verify that there are no pending config changes.
if let Some(proposal) = schema.public.pending_proposal.get() {
// We have a proposal, check that it's actual.
if current_height < proposal.config_propose.actual_from {
return Err(ConfigurationError::ConfigProposeExists.into());
} else {
// Proposal is outdated but was not removed (e.g. because of the panic
// during config applying), clean it.
schema.public.pending_proposal.remove();
}
}
drop(schema);
// Verify changes in the proposal.
Self::verify_config_changes(&mut context, &propose.changes)?;
let mut schema = SchemaImpl::new(context.service_data());
// After all the checks verify that configuration number is expected one.
let expected_config_number = schema.get_configuration_number();
if propose.configuration_number != expected_config_number {
let msg = format!(
"Number for config proposal ({}) differs from the expected one ({})",
propose.configuration_number, expected_config_number
);
return Err(ConfigurationError::IncorrectConfigurationNumber.with_description(msg));
}
schema.increase_configuration_number();
let propose_hash = propose.object_hash();
schema.config_confirms.confirm(&propose_hash, author);
let config_entry = ConfigProposalWithHash {
config_propose: propose,
propose_hash,
};
schema.public.pending_proposal.set(config_entry);
Ok(())
}
fn confirm_config_change(
&self,
context: ExecutionContext<'_>,
vote: ConfigVote,
) -> Self::Output {
let author = get_validator(&context)?;
let core_schema = context.data().for_core();
let mut schema = SchemaImpl::new(context.service_data());
let entry = schema
.public
.pending_proposal
.get()
.ok_or(ConfigurationError::ConfigProposeNotRegistered)?;
// Verify that this config proposal is registered.
if entry.propose_hash != vote.propose_hash {
let msg = format!(
"Mismatch between the hash of the saved proposal ({}) and the hash \
referenced in the vote ({})",
entry.propose_hash, vote.propose_hash
);
return Err(ConfigurationError::ConfigProposeNotRegistered.with_description(msg));
}
// Verify that we didn't reach the deadline height.
let config_propose = entry.config_propose;
let current_height = core_schema.height();
if config_propose.actual_from <= current_height {
let msg = format!(
"Deadline height ({}) exceeded for the config proposal ({}); \
voting for it is impossible",
config_propose.actual_from, current_height
);
return Err(SupervisorCommonError::DeadlineExceeded.with_description(msg));
}
let already_confirmed = schema
.config_confirms
.confirmed_by(&entry.propose_hash, &author);
if already_confirmed {
return Err(ConfigurationError::AttemptToVoteTwice.into());
}
schema.config_confirms.confirm(&vote.propose_hash, author);
log::trace!(
"Propose config {:?} has been confirmed by {:?}",
vote.propose_hash,
author
);
Ok(())
}
fn request_artifact_deploy(
&self,
context: ExecutionContext<'_>,
deploy: DeployRequest,
) -> Self::Output {
// Verify that transaction author is validator.
let author = get_validator(&context)?;
deploy.artifact.validate().map_err(|e| {
let msg = format!(
"Artifact identifier `{}` is invalid: {}",
deploy.artifact, e
);
ArtifactError::InvalidArtifactId.with_description(msg)
})?;
// Check that we didn't reach the deadline height.
let core_schema = context.data().for_core();
let current_height = core_schema.height();
if deploy.deadline_height < current_height {
return Err(SupervisorCommonError::ActualFromIsPast.into());
}
let mut schema = SchemaImpl::new(context.service_data());
// Verify that the artifact is not deployed yet.
let is_deployed = context
.data()
.for_dispatcher()
.get_artifact(&deploy.artifact)
.is_some();
if is_deployed {
let msg = format!("Artifact `{}` is already deployed", deploy.artifact);
return Err(ArtifactError::AlreadyDeployed.with_description(msg));
}
// If deployment is already registered, check whether the request is new.
if schema.pending_deployments.contains(&deploy.artifact) {
let new_confirmation = !schema.deploy_requests.confirmed_by(&deploy, &author);
return if new_confirmation {
// It's OK, just an additional confirmation.
schema.deploy_requests.confirm(&deploy, author);
Ok(())
} else {
// Author already confirmed deployment of this artifact, so it's a duplicate.
let msg = format!(
"Deploy of artifact `{}` is already confirmed by validator {}",
deploy.artifact, author
);
Err(ArtifactError::DeployRequestAlreadyRegistered.with_description(msg))
};
}
schema.deploy_requests.confirm(&deploy, author);
let supervisor_mode = schema.supervisor_config().mode;
let validator_count = core_schema.consensus_config().validator_keys.len();
if supervisor_mode.deploy_approved(&deploy, &schema.deploy_requests, validator_count) {
schema.deploy_states.put(&deploy, AsyncEventState::Pending);
log::trace!("Deploy artifact request accepted {:?}", deploy.artifact);
let artifact = deploy.artifact.clone();
schema.pending_deployments.put(&artifact, deploy);
}
Ok(())
}
fn report_deploy_result(
&self,
context: ExecutionContext<'_>,
deploy_result: DeployResult,
) -> Self::Output {
// Verify that transaction author is validator.
let author = get_validator(&context)?;
let core_schema = context.data().for_core();
let current_height = core_schema.height();
let schema = SchemaImpl::new(context.service_data());
// Check if deployment already failed.
if schema
.deploy_states
.get(&deploy_result.request)
.map_or(false, |state| state.is_failed())
{
// This deployment is already resulted in failure, no further
// processing needed.
return Ok(());
}
// Verify that this deployment is registered.
let deploy_request = schema
.pending_deployments
.get(&deploy_result.request.artifact)
.ok_or_else(|| {
let msg = format!(
"Deploy of artifact `{}` is not registered; reporting its result is impossible",
deploy_result.request.artifact
);
ArtifactError::DeployRequestNotRegistered.with_description(msg)
})?;
// Check that pending deployment is the same as in confirmation.
if deploy_request != deploy_result.request {
let msg = format!(
"Mismatch between the recorded deploy request for artifact `{}` and the request \
mentioned in the deploy report",
deploy_result.request.artifact
);
return Err(ArtifactError::DeployRequestNotRegistered.with_description(msg));
}
// Verify that we didn't reach deadline height.
if deploy_request.deadline_height < current_height {
let msg = format!(
"Deadline height ({}) exceeded for the deploy request ({}); \
reporting deploy result is impossible",
deploy_request.deadline_height, current_height
);
return Err(SupervisorCommonError::DeadlineExceeded.with_description(msg));
}
drop(schema);
match deploy_result.result.0 {
Ok(()) => Self::confirm_deploy(context, deploy_request, author)?,
Err(error) => Self::fail_deploy(&context, &deploy_request, error),
}
Ok(())
}
fn request_migration(
&self,
mut context: ExecutionContext<'_>,
request: MigrationRequest,
) -> Self::Output {
// Verify that transaction author is validator.
let author = get_validator(&context)?;
// Check that target instance exists.
let instance = get_instance_by_name(&context, &request.service)?;
let core_schema = context.data().for_core();
let validator_count = core_schema.consensus_config().validator_keys.len();
// Check that we didn't reach the deadline height.
let current_height = core_schema.height();
if request.deadline_height < current_height {
let msg = format!(
"Deadline height ({}) for the migration request is in the past (current height: {})",
request.deadline_height, current_height
);
return Err(SupervisorCommonError::ActualFromIsPast.with_description(msg));
}
let mut schema = SchemaImpl::new(context.service_data());
schema.migration_requests.confirm(&request, author);
let supervisor_mode = schema.supervisor_config().mode;
let migration_approved = supervisor_mode.migration_approved(
&request,
&schema.migration_requests,
validator_count,
);
if migration_approved {
log::trace!(
"Migration request for instance {} accepted",
request.service
);
// Store initial state of the request.
let mut state =
MigrationState::new(AsyncEventState::Pending, instance.data_version().clone());
schema.migration_states.put(&request, state.clone());
// Store the migration as pending. It will be removed in `before_transactions` hook
// once the migration will be completed (either successfully or unsuccessfully).
schema.pending_migrations.insert(request.clone());
// Finally, request core to start the migration.
// If migration initialization will fail now, it won't be a transaction execution error,
// since migration failure is one of possible outcomes of migration process. Instead of
// returning an error, we will just mark this migration as failed.
drop(schema);
let supervisor_extensions = context.supervisor_extensions();
let result = supervisor_extensions
.initiate_migration(request.new_artifact.clone(), &request.service);
// Check whether migration started successfully.
let migration_type = match result {
Ok(ty) => ty,
Err(error) => {
// Migration failed even before start, softly mark it as failed.
let initiate_rollback = false;
return Self::fail_migration(context, &request, error, initiate_rollback);
}
};
if let MigrationType::FastForward = migration_type {
// Migration is fast-forward, complete it immediately.
// No agreement needed, since nodes which will behave differently will obtain
// different blockchain state hash and will be excluded from consensus.
log::trace!("Applied fast-forward migration with request {:?}", request);
let new_version = request.new_artifact.version.clone();
let mut schema = SchemaImpl::new(context.service_data());
// Update the state of a migration.
state.update(AsyncEventState::Succeed, new_version);
schema.migration_states.put(&request, state);
// Remove the migration from the list of pending.
schema.pending_migrations.remove(&request);
}
}
Ok(())
}
fn report_migration_result(
&self,
context: ExecutionContext<'_>,
result: MigrationResult,
) -> Self::Output {
// Verifies that transaction author is validator.
let author = get_validator(&context)?;
let core_schema = context.data().for_core();
let current_height = core_schema.height();
let schema = SchemaImpl::new(context.service_data());
// Verify that this migration is registered.
let state = schema
.migration_states
.get(&result.request)
.ok_or_else(|| {
let msg = format!(
"Migration request {:?} is not registered; impossible to process its result",
result.request
);
MigrationError::MigrationRequestNotRegistered.with_description(msg)
})?;
// Check if migration already failed.
if state.is_failed() {
// This migration is already resulted in failure, no further
// processing needed.
return Ok(());
}
// Verify that we didn't reach deadline height.
if result.request.deadline_height < current_height {
let msg = format!(
"Deadline height ({}) exceeded for the migration request ({}); \
reporting its result is impossible",
result.request.deadline_height, current_height
);
return Err(SupervisorCommonError::DeadlineExceeded.with_description(msg));
}
drop(schema);
match result.status.0 {
Ok(hash) => Self::confirm_migration(context, &result.request, hash, author),
Err(error) => {
// Since the migration process error is represented as a string rather than
// `ExecutionError`, we use our service error code, but set the description
// to the actual error.
let fail_cause =
ExecutionError::service(MigrationError::MigrationFailed as u8, error);
let initiate_rollback = true;
Self::fail_migration(context, &result.request, fail_cause, initiate_rollback)
}
}
}
}
impl Supervisor {
/// Verifies that each change introduced within config proposal is valid.
fn verify_config_changes(
context: &mut ExecutionContext<'_>,
changes: &[ConfigChange],
) -> Result<(), ExecutionError> {
// To prevent multiple consensus change proposition in one request
let mut consensus_propose_added = false;
// To prevent multiple service change proposition in one request
let mut modified_instances = HashSet::new();
// To prevent multiple services start in one request.
let mut services_to_start = HashSet::new();
// To prevent starting services with an unloaded artifact.
let mut artifacts_for_started_services = HashSet::new();
let mut unloaded_artifacts = HashSet::new();
// Perform config verification.
for change in changes {
change.register_instance(&mut modified_instances)?;
match change {
ConfigChange::Consensus(config) => {
if consensus_propose_added {
let msg = "Discarded multiple consensus change proposals in one request";
return Err(ConfigurationError::malformed_propose(msg));
}
consensus_propose_added = true;
config
.validate()
.map_err(ConfigurationError::malformed_propose)?;
}
ConfigChange::Service(config) => {
context.verify_config(config.instance_id, config.params.clone())?;
}
ConfigChange::StartService(start_service) => {
if !services_to_start.insert(&start_service.name) {
let msg = format!(
"Discarded multiple starts of service `{}`",
start_service.name
);
return Err(ConfigurationError::malformed_propose(msg));
}
artifacts_for_started_services.insert(&start_service.artifact);
start_service.validate(context)?;
}
ConfigChange::StopService(stop_service) => {
stop_service.validate(context)?;
}
ConfigChange::ResumeService(resume_service) => {
resume_service.validate(context)?;
}
ConfigChange::FreezeService(freeze_service) => {
let instance_state = freeze_service.validate(context)?;
let runtime_id = instance_state.spec.artifact.runtime_id;
if !context
.supervisor_extensions()
.check_feature(runtime_id, &RuntimeFeature::FreezingServices)
{
let msg = format!(
"Cannot freeze service `{}`: runtime with ID {}, with which \
its artifact `{}` is associated, does not support service freezing",
instance_state.spec.as_descriptor(),
runtime_id,
instance_state.spec.artifact,
);
return Err(ConfigurationError::malformed_propose(msg));
}
}
ConfigChange::UnloadArtifact(unload_artifact) => {
if !unloaded_artifacts.insert(&unload_artifact.artifact_id) {
let msg = format!(
"Discarded multiple unloads of artifact `{}`",
unload_artifact.artifact_id
);
return Err(ConfigurationError::malformed_propose(msg));
}
unload_artifact.validate(context)?;
}
}
}
let mut intersection = unloaded_artifacts.intersection(&artifacts_for_started_services);
if let Some(&artifact) = intersection.next() {
let msg = format!(
"Discarded proposal which both starts a service from artifact `{}` and unloads it",
artifact
);
return Err(ConfigurationError::malformed_propose(msg));
}
Ok(())
}
/// Confirms a deploy by the given author's public key and checks
/// if all the confirmations are collected. If so, starts the artifact registration.
fn confirm_deploy(
mut context: ExecutionContext<'_>,
deploy_request: DeployRequest,
author: PublicKey,
) -> Result<(), ExecutionError> {
let core_schema = context.data().for_core();
let mut schema = SchemaImpl::new(context.service_data());
schema.deploy_confirmations.confirm(&deploy_request, author);
// Check if we have enough confirmations for the deployment.
let config = core_schema.consensus_config();
let validator_keys = config.validator_keys.iter().map(|keys| keys.service_key);
if schema
.deploy_confirmations
.intersect_with_validators(&deploy_request, validator_keys)
{
log::trace!(
"Registering deployed artifact in dispatcher {:?}",
deploy_request.artifact
);
// Remove artifact from pending deployments.
schema
.deploy_states
.put(&deploy_request, AsyncEventState::Succeed);
drop(schema);
// We have enough confirmations to register the deployed artifact in the dispatcher;
// if this action fails, this transaction will be canceled.
context
.supervisor_extensions()
.start_artifact_registration(&deploy_request.artifact, deploy_request.spec);
}
Ok(())
}
/// Marks deployment as failed, discarding the further deployment steps.
fn fail_deploy(
context: &ExecutionContext<'_>,
deploy_request: &DeployRequest,
error: ExecutionError,
) {
log::warn!(
"Deploying artifact for request {:?} failed. Reason: {}",
deploy_request,
error
);
let height = context.data().for_core().height();
let mut schema = SchemaImpl::new(context.service_data());
// Mark deploy as failed.
schema
.deploy_states
.put(deploy_request, AsyncEventState::Failed { height, error });
// Remove artifact from pending deployments: since we require
// a confirmation from every node, failure for one node means failure
// for the whole network.
schema.pending_deployments.remove(&deploy_request.artifact);
}
/// Confirms a local migration success by the given author's public key and checks
/// if all the confirmations are collected. If so, commits the migration.
/// If migration state hash differs from the expected one, migration fails though,
/// and `fail_migration` method is invoked.
fn confirm_migration(
mut context: ExecutionContext<'_>,
request: &MigrationRequest,
state_hash: Hash,
author: PublicKey,
) -> Result<(), ExecutionError> {
let core_schema = context.data().for_core();
let mut schema = SchemaImpl::new(context.service_data());
let mut state = schema.migration_state_unchecked(request);
// Verify that state hash does match expected one.
if let Err(error) = state.add_state_hash(state_hash) {
// Hashes do not match, rollback the migration.
drop(schema); // Required for the context reborrow in `fail_migration`.
let initiate_rollback = true;
return Self::fail_migration(context, request, error, initiate_rollback);
}
// Hash is OK, process further.
// Update state and add a confirmation.
schema.migration_states.put(request, state.clone());
schema.migration_confirmations.confirm(request, author);
// Check if we have enough confirmations to finish the migration.
let consensus_config = core_schema.consensus_config();
let validator_keys = consensus_config
.validator_keys
.iter()
.map(|keys| keys.service_key);
if schema
.migration_confirmations
.intersect_with_validators(request, validator_keys)
{
log::trace!(
"Confirming commit of migration request {:?}. Result state hash: {:?}",
request,
state_hash
);
// Schedule migration for a flush.
// Migration will be flushed and marked as succeed in `before_transactions`
// hook of the next block.
schema.migration_states.put(request, state);
schema.pending_migrations.remove(request);
schema.migrations_to_flush.insert(request.clone());
drop(schema);
// Commit the migration.
let supervisor_extensions = context.supervisor_extensions();
supervisor_extensions.commit_migration(&request.service, state_hash)?;
}
Ok(())
}
/// Marks migration as failed, discarding the further migration steps.
/// If `initiate_rollback` argument is `true`, ongoing migration will
/// be rolled back after the invocation of this method.
/// This argument is required, since migration can fail on the init step.
fn fail_migration(
mut context: ExecutionContext<'_>,
request: &MigrationRequest,
error: ExecutionError,
initiate_rollback: bool,
) -> Result<(), ExecutionError> {
if initiate_rollback {
log::warn!(
"Migration for a request {:?} failed. Reason: {}. \
This migration is going to be rolled back.",
request,
error
);
} else {
log::warn!(
"Migration for a request {:?} failed to start. Reason: {}.",
request,
error
);
}
let height = context.data().for_core().height();
let mut schema = SchemaImpl::new(context.service_data());
// Mark deploy as failed.
let mut state = schema.migration_state_unchecked(request);
state.fail(AsyncEventState::Failed { height, error });
schema.migration_states.put(request, state);
// Migration is not pending anymore, remove it.
schema.pending_migrations.remove(request);
// Rollback the migration.
drop(schema);
if initiate_rollback {
context
.supervisor_extensions()
.rollback_migration(&request.service)?;
}
Ok(())
}
}
| 40.258098 | 102 | 0.600955 |
75732ff26e39bc674e2d7999c41efea8cd064b6a | 3,354 | use super::tuple::Tuple;
use crate::astype::*;
use convert_case::{Case, Casing};
pub trait IsNullable {
fn is_nullable(&self) -> bool;
}
impl IsNullable for ASType {
fn is_nullable(&self) -> bool {
matches!(
self,
ASType::ConstPtr(_)
| ASType::MutPtr(_)
| ASType::ReadBuffer(_)
| ASType::WriteBuffer(_)
| ASType::Enum(_)
| ASType::Struct(_)
| ASType::Tuple(_)
| ASType::Union(_)
)
}
}
pub trait Normalize {
fn as_str(&self) -> &str;
fn as_type(&self) -> String {
self.as_str().to_case(Case::Pascal)
}
fn as_fn(&self) -> String {
self.as_str().to_case(Case::Camel)
}
fn as_fn_suffix(&self) -> String {
self.as_str().to_case(Case::UpperCamel)
}
fn as_var(&self) -> String {
self.as_str().to_case(Case::Snake)
}
fn as_const(&self) -> String {
self.as_str().to_case(Case::UpperSnake)
}
fn as_namespace(&self) -> String {
self.as_str().to_string().to_case(Case::Pascal)
}
}
impl<T: AsRef<str>> Normalize for T {
fn as_str(&self) -> &str {
self.as_ref()
}
}
pub trait ToLanguageRepresentation {
fn as_astype(&self) -> &ASType;
fn to_string(&self) -> String {
self.as_lang()
}
fn as_lang(&self) -> String {
match self.as_astype() {
ASType::Alias(alias) => alias.name.as_type(),
ASType::Bool => "bool".to_string(),
ASType::Char32 => "Char32".to_string(),
ASType::Char8 => "Char8".to_string(),
ASType::F32 => "f32".to_string(),
ASType::F64 => "f64".to_string(),
ASType::Handle(_resource_name) => "WasiHandle".to_string(),
ASType::ConstPtr(pointee) => format!("WasiPtr({})", pointee.to_string()),
ASType::MutPtr(pointee) => format!("WasiMutPtr({})", pointee.to_string()),
ASType::Option(_) => todo!(),
ASType::Result(_) => todo!(),
ASType::S8 => "i8".to_string(),
ASType::S16 => "i16".to_string(),
ASType::S32 => "i32".to_string(),
ASType::S64 => "i64".to_string(),
ASType::U8 => "u8".to_string(),
ASType::U16 => "u16".to_string(),
ASType::U32 => "u32".to_string(),
ASType::U64 => "u64".to_string(),
ASType::USize => "usize".to_string(),
ASType::Void => "()".to_string(),
ASType::Constants(_) => unimplemented!(),
ASType::Enum(enum_) => enum_.repr.as_ref().as_lang(),
ASType::Struct(_) => unimplemented!(),
ASType::Tuple(tuple_members) => Tuple::name_for(tuple_members).as_type(),
ASType::Union(_) => unimplemented!(),
ASType::Slice(element_type) => format!("WasiMutSlice({})", element_type.as_lang()),
ASType::String(_) => "WasiString".to_string(),
ASType::ReadBuffer(element_type) => format!("WasiSlice({})", element_type.as_lang()),
ASType::WriteBuffer(element_type) => {
format!("WasiMutSlice({})", element_type.to_string())
}
}
}
}
impl ToLanguageRepresentation for ASType {
fn as_astype(&self) -> &ASType {
self
}
}
| 30.770642 | 97 | 0.525045 |
724023200b1a7b46ca54975c09ffdd9f71848621 | 312 | // This is the default implementation for basic pbr material.
#[derive(Copy, Clone, Debug, bytemuck::Zeroable, bytemuck::Pod)]
#[repr(C)]
pub struct PBRStandardMaterial {
albedo: [f32; 3],
metallic: f32,
roughness: f32,
index_of_refraction: f32,
ambient_occlusion: f32,
emmisive: f32,
}
| 24 | 64 | 0.689103 |
11b01eae1c64c681777c31413f8cab1ea73ec68e | 12,573 | // Copyright 2022 The Engula Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod acl;
mod eval;
pub mod fsm;
pub mod job;
pub mod raft;
use std::{
sync::{atomic::AtomicI32, Arc, Mutex},
task::{Poll, Waker},
};
use engula_api::{
server::v1::{
group_request_union::Request, group_response_union::Response, BatchWriteResponse,
ChangeReplicasResponse, CreateShardResponse, GroupDesc, GroupRequest, GroupResponse,
RaftRole, ReplicaDesc, ReplicaState,
},
v1::{DeleteResponse, GetResponse, PutResponse},
};
pub use self::raft::RaftNodeFacade as RaftSender;
use self::{
fsm::{DescObserver, GroupStateMachine},
raft::{RaftManager, RaftNodeFacade, StateObserver},
};
use super::{group_engine::GroupEngine, job::StateChannel};
use crate::{
serverpb::v1::{EvalResult, ReplicaLocalState, SyncOp},
Error, Result,
};
pub struct ReplicaInfo {
pub replica_id: u64,
pub group_id: u64,
local_state: AtomicI32,
}
#[derive(Default)]
struct LeaseState {
replica_state: ReplicaState,
descriptor: GroupDesc,
leader_subscribers: Vec<Waker>,
}
/// A struct that observes changes to `GroupDesc` and `ReplicaState` , and broadcasts those changes
/// while saving them to `LeaseState`.
#[derive(Clone)]
struct LeaseStateObserver {
info: Arc<ReplicaInfo>,
lease_state: Arc<Mutex<LeaseState>>,
state_channel: StateChannel,
}
pub struct Replica
where
Self: Send,
{
info: Arc<ReplicaInfo>,
group_engine: GroupEngine,
raft_node: RaftNodeFacade,
lease_state: Arc<Mutex<LeaseState>>,
}
impl Replica {
/// Create new instance of the specified raft node.
pub async fn create(
replica_id: u64,
target_desc: &GroupDesc,
raft_mgr: &RaftManager,
) -> Result<()> {
let voters = target_desc
.replicas
.iter()
.map(|r| (r.id, r.node_id))
.collect::<Vec<_>>();
let eval_results = target_desc
.shards
.iter()
.cloned()
.map(eval::add_shard)
.collect::<Vec<_>>();
raft::write_initial_state(raft_mgr.engine(), replica_id, voters, eval_results).await?;
Ok(())
}
/// Open the existed replica of raft group.
pub async fn recover(
group_id: u64,
desc: ReplicaDesc,
local_state: ReplicaLocalState,
state_channel: StateChannel,
group_engine: GroupEngine,
raft_mgr: &RaftManager,
) -> Result<Self> {
let info = Arc::new(ReplicaInfo::new(desc.id, group_id, local_state));
let lease_state = Arc::new(Mutex::new(LeaseState {
descriptor: group_engine.descriptor().unwrap(),
..Default::default()
}));
let state_observer = Box::new(LeaseStateObserver::new(
info.clone(),
lease_state.clone(),
state_channel,
));
let fsm = GroupStateMachine::new(group_engine.clone(), state_observer.clone());
let raft_node = raft_mgr
.start_raft_group(group_id, desc, fsm, state_observer)
.await?;
Ok(Replica {
info,
group_engine,
raft_node,
lease_state,
})
}
/// Shutdown this replicas with the newer `GroupDesc`.
pub async fn shutdown(&self, _actual_desc: &GroupDesc) -> Result<()> {
// TODO(walter) check actual desc.
self.info.terminate();
{
let mut lease_state = self.lease_state.lock().unwrap();
lease_state.wake_all_waiters();
}
// TODO(walter) blocks until all asynchronously task finished.
Ok(())
}
}
impl Replica {
/// Execute group request and fill response.
pub async fn execute(&self, group_request: &GroupRequest) -> Result<GroupResponse> {
if self.info.is_terminated() {
return Err(Error::GroupNotFound(self.info.group_id));
}
// TODO(walter) check request epoch.
let group_id = group_request.group_id;
let shard_id = group_request.shard_id;
debug_assert_eq!(group_id, self.info.group_id);
let request = group_request
.request
.as_ref()
.and_then(|request| request.request.as_ref())
.ok_or_else(|| Error::InvalidArgument("GroupRequest::request".into()))?;
self.check_request_early(group_id, request)?;
let resp = self.evaluate_command(shard_id, request).await?;
Ok(GroupResponse::new(resp))
}
pub async fn on_leader(&self) -> Result<()> {
if self.info.is_terminated() {
return Err(Error::NotLeader(self.info.group_id, None));
}
use futures::future::poll_fn;
poll_fn(|ctx| {
let mut lease_state = self.lease_state.lock().unwrap();
if lease_state.still_valid() {
Poll::Ready(())
} else {
lease_state.leader_subscribers.push(ctx.waker().clone());
Poll::Pending
}
})
.await;
// FIXME(walter) support shutdown a replica.
Ok(())
}
/// Propose `SyncOp` to raft log.
pub(super) async fn propose_sync_op(&self, op: SyncOp) -> Result<()> {
self.check_leader_early()?;
let eval_result = EvalResult {
op: Some(op),
..Default::default()
};
self.raft_node.clone().propose(eval_result).await??;
Ok(())
}
#[inline]
pub fn replica_info(&self) -> Arc<ReplicaInfo> {
self.info.clone()
}
#[inline]
pub fn raft_node(&self) -> RaftNodeFacade {
self.raft_node.clone()
}
}
impl Replica {
/// Delegates the eval method for the given `Request`.
async fn evaluate_command(&self, shard_id: u64, request: &Request) -> Result<Response> {
let resp: Response;
let eval_result_opt = match &request {
Request::Get(get) => {
let value = eval::get(&self.group_engine, shard_id, &get.key).await?;
resp = Response::Get(GetResponse {
value: value.map(|v| v.to_vec()),
});
None
}
Request::Put(req) => {
resp = Response::Put(PutResponse {});
let eval_result =
eval::put(&self.group_engine, shard_id, &req.key, &req.value).await?;
Some(eval_result)
}
Request::Delete(req) => {
resp = Response::Delete(DeleteResponse {});
let eval_result = eval::delete(&self.group_engine, shard_id, &req.key).await?;
Some(eval_result)
}
Request::BatchWrite(req) => {
resp = Response::BatchWrite(BatchWriteResponse {});
eval::batch_write(&self.group_engine, shard_id, req).await?
}
Request::CreateShard(req) => {
// TODO(walter) check the existing of shard.
let shard = req
.shard
.as_ref()
.cloned()
.ok_or_else(|| Error::InvalidArgument("CreateShard::shard".into()))?;
resp = Response::CreateShard(CreateShardResponse {});
Some(eval::add_shard(shard))
}
Request::ChangeReplicas(req) => {
if let Some(change) = &req.change_replicas {
self.raft_node
.clone()
.change_config(change.clone())
.await??;
}
return Ok(Response::ChangeReplicas(ChangeReplicasResponse {}));
}
};
if let Some(eval_result) = eval_result_opt {
self.propose(eval_result).await?;
}
Ok(resp)
}
async fn propose(&self, eval_result: EvalResult) -> Result<()> {
self.raft_node.clone().propose(eval_result).await??;
Ok(())
}
fn check_request_early(&self, group_id: u64, _request: &Request) -> Result<()> {
let lease_state = self.lease_state.lock().unwrap();
if !lease_state.still_valid() {
Err(Error::NotLeader(group_id, None))
} else {
Ok(())
}
}
fn check_leader_early(&self) -> Result<()> {
let lease_state = self.lease_state.lock().unwrap();
if !lease_state.still_valid() {
Err(Error::NotLeader(self.info.group_id, None))
} else {
Ok(())
}
}
}
impl ReplicaInfo {
pub fn new(replica_id: u64, group_id: u64, local_state: ReplicaLocalState) -> Self {
ReplicaInfo {
replica_id,
group_id,
local_state: AtomicI32::new(local_state.into()),
}
}
#[inline]
pub fn local_state(&self) -> ReplicaLocalState {
use std::sync::atomic::Ordering;
ReplicaLocalState::from_i32(self.local_state.load(Ordering::Acquire)).unwrap()
}
#[inline]
pub fn is_terminated(&self) -> bool {
self.local_state() == ReplicaLocalState::Terminated
}
#[inline]
pub fn terminate(&self) {
use std::sync::atomic::Ordering;
const TERMINATED: i32 = ReplicaLocalState::Terminated as i32;
let mut local_state: i32 = self.local_state().into();
while local_state == TERMINATED {
local_state = self
.local_state
.compare_exchange(
local_state,
TERMINATED,
Ordering::Release,
Ordering::Relaxed,
)
.into_ok_or_err();
}
}
}
impl LeaseState {
#[inline]
fn still_valid(&self) -> bool {
self.replica_state.role == RaftRole::Leader.into()
}
#[inline]
fn wake_all_waiters(&mut self) {
for waker in std::mem::take(&mut self.leader_subscribers) {
waker.wake();
}
}
}
impl LeaseStateObserver {
fn new(
info: Arc<ReplicaInfo>,
lease_state: Arc<Mutex<LeaseState>>,
state_channel: StateChannel,
) -> Self {
LeaseStateObserver {
info,
lease_state,
state_channel,
}
}
fn update_replica_state(
&self,
voted_for: u64,
term: u64,
role: RaftRole,
) -> (ReplicaState, Option<GroupDesc>) {
let replica_state = ReplicaState {
replica_id: self.info.replica_id,
group_id: self.info.group_id,
term,
voted_for,
role: role.into(),
};
let mut lease_state = self.lease_state.lock().unwrap();
lease_state.replica_state = replica_state.clone();
let desc = if role == RaftRole::Leader {
lease_state.wake_all_waiters();
Some(lease_state.descriptor.clone())
} else {
None
};
(replica_state, desc)
}
fn update_descriptor(&self, descriptor: GroupDesc) -> bool {
let mut lease_state = self.lease_state.lock().unwrap();
lease_state.descriptor = descriptor;
lease_state.replica_state.role == RaftRole::Leader.into()
}
}
impl StateObserver for LeaseStateObserver {
fn on_state_updated(&mut self, _leader_id: u64, voted_for: u64, term: u64, role: RaftRole) {
let (state, desc) = self.update_replica_state(voted_for, term, role);
self.state_channel
.broadcast_replica_state(self.info.group_id, state);
if let Some(desc) = desc {
self.state_channel
.broadcast_group_descriptor(self.info.group_id, desc);
}
}
}
impl DescObserver for LeaseStateObserver {
fn on_descriptor_updated(&mut self, descriptor: GroupDesc) {
if self.update_descriptor(descriptor.clone()) {
self.state_channel
.broadcast_group_descriptor(self.info.group_id, descriptor);
}
}
}
| 30.223558 | 99 | 0.571781 |
5027ca05b6f47c991e64ea10c08d84c5dffe94d1 | 22,938 | use super::system_param::FetchSystemParam;
use crate::{
ArchetypeComponent, Commands, QueryAccess, Resources, System, SystemId, SystemParam,
ThreadLocalExecution, TypeAccess, World,
};
use parking_lot::Mutex;
use std::{any::TypeId, borrow::Cow, cell::UnsafeCell, sync::Arc};
pub struct SystemState {
pub(crate) id: SystemId,
pub(crate) name: Cow<'static, str>,
pub(crate) archetype_component_access: TypeAccess<ArchetypeComponent>,
pub(crate) resource_access: TypeAccess<TypeId>,
pub(crate) local_resource_access: TypeAccess<TypeId>,
pub(crate) query_archetype_component_accesses: Vec<TypeAccess<ArchetypeComponent>>,
pub(crate) query_accesses: Vec<Vec<QueryAccess>>,
pub(crate) query_type_names: Vec<&'static str>,
pub(crate) commands: UnsafeCell<Commands>,
pub(crate) arc_commands: Option<Arc<Mutex<Commands>>>,
pub(crate) current_query_index: UnsafeCell<usize>,
}
// SAFE: UnsafeCell<Commands> and UnsafeCell<usize> only accessed from the thread they are scheduled on
unsafe impl Sync for SystemState {}
impl SystemState {
pub fn reset_indices(&mut self) {
// SAFE: done with unique mutable access to Self
unsafe {
*self.current_query_index.get() = 0;
}
}
pub fn update(&mut self, world: &World) {
self.archetype_component_access.clear();
let mut conflict_index = None;
let mut conflict_name = None;
for (i, (query_accesses, component_access)) in self
.query_accesses
.iter()
.zip(self.query_archetype_component_accesses.iter_mut())
.enumerate()
{
component_access.clear();
for query_access in query_accesses.iter() {
query_access.get_world_archetype_access(world, Some(component_access));
}
if !component_access.is_compatible(&self.archetype_component_access) {
conflict_index = Some(i);
conflict_name = component_access
.get_conflict(&self.archetype_component_access)
.and_then(|archetype_component| {
query_accesses
.iter()
.filter_map(|query_access| {
query_access.get_type_name(archetype_component.component)
})
.next()
});
break;
}
self.archetype_component_access.union(component_access);
}
if let Some(conflict_index) = conflict_index {
let mut conflicts_with_index = None;
for prior_index in 0..conflict_index {
if !self.query_archetype_component_accesses[conflict_index]
.is_compatible(&self.query_archetype_component_accesses[prior_index])
{
conflicts_with_index = Some(prior_index);
}
}
panic!("System {} has conflicting queries. {} conflicts with the component access [{}] in this prior query: {}.",
self.name,
self.query_type_names[conflict_index],
conflict_name.unwrap_or("Unknown"),
conflicts_with_index.map(|index| self.query_type_names[index]).unwrap_or("Unknown"));
}
}
}
pub struct FuncSystem<Out> {
func:
Box<dyn FnMut(&mut SystemState, &World, &Resources) -> Option<Out> + Send + Sync + 'static>,
thread_local_func:
Box<dyn FnMut(&mut SystemState, &mut World, &mut Resources) + Send + Sync + 'static>,
init_func: Box<dyn FnMut(&mut SystemState, &World, &mut Resources) + Send + Sync + 'static>,
state: SystemState,
}
impl<Out: 'static> System for FuncSystem<Out> {
type In = ();
type Out = Out;
fn name(&self) -> std::borrow::Cow<'static, str> {
self.state.name.clone()
}
fn id(&self) -> SystemId {
self.state.id
}
fn update(&mut self, world: &World) {
self.state.update(world);
}
fn archetype_component_access(&self) -> &TypeAccess<ArchetypeComponent> {
&self.state.archetype_component_access
}
fn resource_access(&self) -> &TypeAccess<std::any::TypeId> {
&self.state.resource_access
}
fn thread_local_execution(&self) -> ThreadLocalExecution {
ThreadLocalExecution::NextFlush
}
unsafe fn run_unsafe(
&mut self,
_input: Self::In,
world: &World,
resources: &Resources,
) -> Option<Out> {
(self.func)(&mut self.state, world, resources)
}
fn run_thread_local(&mut self, world: &mut World, resources: &mut Resources) {
(self.thread_local_func)(&mut self.state, world, resources)
}
fn initialize(&mut self, world: &mut World, resources: &mut Resources) {
(self.init_func)(&mut self.state, world, resources);
}
}
pub struct InputFuncSystem<In, Out> {
func: Box<
dyn FnMut(In, &mut SystemState, &World, &Resources) -> Option<Out> + Send + Sync + 'static,
>,
thread_local_func:
Box<dyn FnMut(&mut SystemState, &mut World, &mut Resources) + Send + Sync + 'static>,
init_func: Box<dyn FnMut(&mut SystemState, &World, &mut Resources) + Send + Sync + 'static>,
state: SystemState,
}
impl<In: 'static, Out: 'static> System for InputFuncSystem<In, Out> {
type In = In;
type Out = Out;
fn name(&self) -> std::borrow::Cow<'static, str> {
self.state.name.clone()
}
fn id(&self) -> SystemId {
self.state.id
}
fn update(&mut self, world: &World) {
self.state.update(world);
}
fn archetype_component_access(&self) -> &TypeAccess<ArchetypeComponent> {
&self.state.archetype_component_access
}
fn resource_access(&self) -> &TypeAccess<std::any::TypeId> {
&self.state.resource_access
}
fn thread_local_execution(&self) -> ThreadLocalExecution {
ThreadLocalExecution::NextFlush
}
unsafe fn run_unsafe(
&mut self,
input: In,
world: &World,
resources: &Resources,
) -> Option<Out> {
(self.func)(input, &mut self.state, world, resources)
}
fn run_thread_local(&mut self, world: &mut World, resources: &mut Resources) {
(self.thread_local_func)(&mut self.state, world, resources)
}
fn initialize(&mut self, world: &mut World, resources: &mut Resources) {
(self.init_func)(&mut self.state, world, resources);
}
}
pub trait IntoSystem<Params, SystemType: System> {
fn system(self) -> SystemType;
}
// Systems implicitly implement IntoSystem
impl<Sys: System> IntoSystem<(), Sys> for Sys {
fn system(self) -> Sys {
self
}
}
pub struct In<In>(pub In);
macro_rules! impl_into_system {
($($param: ident),*) => {
impl<Func, Out, $($param: SystemParam),*> IntoSystem<($($param,)*), FuncSystem<Out>> for Func
where
Func:
FnMut($($param),*) -> Out +
FnMut($(<<$param as SystemParam>::Fetch as FetchSystemParam>::Item),*) -> Out +
Send + Sync + 'static, Out: 'static
{
#[allow(unused_variables)]
#[allow(unused_unsafe)]
#[allow(non_snake_case)]
fn system(mut self) -> FuncSystem<Out> {
FuncSystem {
state: SystemState {
name: std::any::type_name::<Self>().into(),
archetype_component_access: TypeAccess::default(),
resource_access: TypeAccess::default(),
local_resource_access: TypeAccess::default(),
id: SystemId::new(),
commands: Default::default(),
arc_commands: Default::default(),
current_query_index: Default::default(),
query_archetype_component_accesses: Vec::new(),
query_accesses: Vec::new(),
query_type_names: Vec::new(),
},
func: Box::new(move |state, world, resources| {
state.reset_indices();
// let mut input = Some(input);
unsafe {
if let Some(($($param,)*)) = <<($($param,)*) as SystemParam>::Fetch as FetchSystemParam>::get_param(state, world, resources) {
Some(self($($param),*))
} else {
None
}
}
}),
thread_local_func: Box::new(|state, world, resources| {
// SAFE: this is called with unique access to SystemState
unsafe {
(&mut *state.commands.get()).apply(world, resources);
}
if let Some(ref commands) = state.arc_commands {
let mut commands = commands.lock();
commands.apply(world, resources);
}
}),
init_func: Box::new(|state, world, resources| {
<<($($param,)*) as SystemParam>::Fetch as FetchSystemParam>::init(state, world, resources)
}),
}
}
}
impl<Func, Input, Out, $($param: SystemParam),*> IntoSystem<(Input, $($param,)*), InputFuncSystem<Input, Out>> for Func
where
Func:
FnMut(In<Input>, $($param),*) -> Out +
FnMut(In<Input>, $(<<$param as SystemParam>::Fetch as FetchSystemParam>::Item),*) -> Out +
Send + Sync + 'static, Input: 'static, Out: 'static
{
#[allow(unused_variables)]
#[allow(unused_unsafe)]
#[allow(non_snake_case)]
fn system(mut self) -> InputFuncSystem<Input, Out> {
InputFuncSystem {
state: SystemState {
name: std::any::type_name::<Self>().into(),
archetype_component_access: TypeAccess::default(),
resource_access: TypeAccess::default(),
local_resource_access: TypeAccess::default(),
id: SystemId::new(),
commands: Default::default(),
arc_commands: Default::default(),
current_query_index: Default::default(),
query_archetype_component_accesses: Vec::new(),
query_accesses: Vec::new(),
query_type_names: Vec::new(),
},
func: Box::new(move |input, state, world, resources| {
state.reset_indices();
// let mut input = Some(input);
unsafe {
if let Some(($($param,)*)) = <<($($param,)*) as SystemParam>::Fetch as FetchSystemParam>::get_param(state, world, resources) {
Some(self(In(input), $($param),*))
} else {
None
}
}
}),
thread_local_func: Box::new(|state, world, resources| {
// SAFE: this is called with unique access to SystemState
unsafe {
(&mut *state.commands.get()).apply(world, resources);
}
if let Some(ref commands) = state.arc_commands {
let mut commands = commands.lock();
commands.apply(world, resources);
}
}),
init_func: Box::new(|state, world, resources| {
<<($($param,)*) as SystemParam>::Fetch as FetchSystemParam>::init(state, world, resources)
}),
}
}
}
};
}
impl_into_system!();
impl_into_system!(A);
impl_into_system!(A, B);
impl_into_system!(A, B, C);
impl_into_system!(A, B, C, D);
impl_into_system!(A, B, C, D, E);
impl_into_system!(A, B, C, D, E, F);
impl_into_system!(A, B, C, D, E, F, G);
impl_into_system!(A, B, C, D, E, F, G, H);
impl_into_system!(A, B, C, D, E, F, G, H, I);
impl_into_system!(A, B, C, D, E, F, G, H, I, J);
impl_into_system!(A, B, C, D, E, F, G, H, I, J, K);
impl_into_system!(A, B, C, D, E, F, G, H, I, J, K, L);
impl_into_system!(A, B, C, D, E, F, G, H, I, J, K, L, M);
impl_into_system!(A, B, C, D, E, F, G, H, I, J, K, L, M, N);
impl_into_system!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O);
impl_into_system!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P);
#[cfg(test)]
mod tests {
use super::IntoSystem;
use crate::{
clear_trackers_system,
resource::{Res, ResMut, Resources},
schedule::Schedule,
ChangedRes, Entity, Local, Or, Query, QuerySet, System, SystemStage, With, World,
};
#[derive(Debug, Eq, PartialEq, Default)]
struct A;
struct B;
struct C;
struct D;
#[test]
fn query_system_gets() {
fn query_system(
mut ran: ResMut<bool>,
entity_query: Query<Entity, With<A>>,
b_query: Query<&B>,
a_c_query: Query<(&A, &C)>,
d_query: Query<&D>,
) {
let entities = entity_query.iter().collect::<Vec<Entity>>();
assert!(
b_query.get_component::<B>(entities[0]).is_err(),
"entity 0 should not have B"
);
assert!(
b_query.get_component::<B>(entities[1]).is_ok(),
"entity 1 should have B"
);
assert!(
b_query.get_component::<A>(entities[1]).is_err(),
"entity 1 should have A, but b_query shouldn't have access to it"
);
assert!(
b_query.get_component::<D>(entities[3]).is_err(),
"entity 3 should have D, but it shouldn't be accessible from b_query"
);
assert!(
b_query.get_component::<C>(entities[2]).is_err(),
"entity 2 has C, but it shouldn't be accessible from b_query"
);
assert!(
a_c_query.get_component::<C>(entities[2]).is_ok(),
"entity 2 has C, and it should be accessible from a_c_query"
);
assert!(
a_c_query.get_component::<D>(entities[3]).is_err(),
"entity 3 should have D, but it shouldn't be accessible from b_query"
);
assert!(
d_query.get_component::<D>(entities[3]).is_ok(),
"entity 3 should have D"
);
*ran = true;
}
let mut world = World::default();
let mut resources = Resources::default();
resources.insert(false);
world.spawn((A,));
world.spawn((A, B));
world.spawn((A, C));
world.spawn((A, D));
run_system(&mut world, &mut resources, query_system.system());
assert!(*resources.get::<bool>().unwrap(), "system ran");
}
#[test]
fn or_query_set_system() {
// Regression test for issue #762
use crate::{Added, Changed, Mutated, Or};
fn query_system(
mut ran: ResMut<bool>,
set: QuerySet<(
Query<(), Or<(Changed<A>, Changed<B>)>>,
Query<(), Or<(Added<A>, Added<B>)>>,
Query<(), Or<(Mutated<A>, Mutated<B>)>>,
)>,
) {
let changed = set.q0().iter().count();
let added = set.q1().iter().count();
let mutated = set.q2().iter().count();
assert_eq!(changed, 1);
assert_eq!(added, 1);
assert_eq!(mutated, 0);
*ran = true;
}
let mut world = World::default();
let mut resources = Resources::default();
resources.insert(false);
world.spawn((A, B));
run_system(&mut world, &mut resources, query_system.system());
assert!(*resources.get::<bool>().unwrap(), "system ran");
}
#[test]
fn changed_resource_system() {
fn incr_e_on_flip(_run_on_flip: ChangedRes<bool>, mut query: Query<&mut i32>) {
for mut i in query.iter_mut() {
*i += 1;
}
}
let mut world = World::default();
let mut resources = Resources::default();
resources.insert(false);
let ent = world.spawn((0,));
let mut schedule = Schedule::default();
let mut update = SystemStage::parallel();
update.add_system(incr_e_on_flip.system());
schedule.add_stage("update", update);
schedule.add_stage(
"clear_trackers",
SystemStage::single(clear_trackers_system.system()),
);
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 1);
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 1);
*resources.get_mut::<bool>().unwrap() = true;
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 2);
}
#[test]
fn changed_resource_or_system() {
fn incr_e_on_flip(
_or: Or<(Option<ChangedRes<bool>>, Option<ChangedRes<i32>>)>,
mut query: Query<&mut i32>,
) {
for mut i in query.iter_mut() {
*i += 1;
}
}
let mut world = World::default();
let mut resources = Resources::default();
resources.insert(false);
resources.insert::<i32>(10);
let ent = world.spawn((0,));
let mut schedule = Schedule::default();
let mut update = SystemStage::parallel();
update.add_system(incr_e_on_flip.system());
schedule.add_stage("update", update);
schedule.add_stage(
"clear_trackers",
SystemStage::single(clear_trackers_system.system()),
);
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 1);
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 1);
*resources.get_mut::<bool>().unwrap() = true;
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 2);
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 2);
*resources.get_mut::<i32>().unwrap() = 20;
schedule.initialize_and_run(&mut world, &mut resources);
assert_eq!(*(world.get::<i32>(ent).unwrap()), 3);
}
#[test]
#[should_panic]
fn conflicting_query_mut_system() {
fn sys(_q1: Query<&mut A>, _q2: Query<&mut A>) {}
let mut world = World::default();
let mut resources = Resources::default();
world.spawn((A,));
run_system(&mut world, &mut resources, sys.system());
}
#[test]
#[should_panic]
fn conflicting_query_immut_system() {
fn sys(_q1: Query<&A>, _q2: Query<&mut A>) {}
let mut world = World::default();
let mut resources = Resources::default();
world.spawn((A,));
run_system(&mut world, &mut resources, sys.system());
}
#[test]
fn query_set_system() {
fn sys(_set: QuerySet<(Query<&mut A>, Query<&B>)>) {}
let mut world = World::default();
let mut resources = Resources::default();
world.spawn((A,));
run_system(&mut world, &mut resources, sys.system());
}
#[test]
#[should_panic]
fn conflicting_query_with_query_set_system() {
fn sys(_query: Query<&mut A>, _set: QuerySet<(Query<&mut A>, Query<&B>)>) {}
let mut world = World::default();
let mut resources = Resources::default();
world.spawn((A,));
run_system(&mut world, &mut resources, sys.system());
}
#[test]
#[should_panic]
fn conflicting_query_sets_system() {
fn sys(_set_1: QuerySet<(Query<&mut A>,)>, _set_2: QuerySet<(Query<&mut A>, Query<&B>)>) {}
let mut world = World::default();
let mut resources = Resources::default();
world.spawn((A,));
run_system(&mut world, &mut resources, sys.system());
}
fn run_system<S: System<In = (), Out = ()>>(
world: &mut World,
resources: &mut Resources,
system: S,
) {
let mut schedule = Schedule::default();
let mut update = SystemStage::parallel();
update.add_system(system);
schedule.add_stage("update", update);
schedule.initialize_and_run(world, resources);
}
#[derive(Default)]
struct BufferRes {
_buffer: Vec<u8>,
}
fn test_for_conflicting_resources<S: System<In = (), Out = ()>>(sys: S) {
let mut world = World::default();
let mut resources = Resources::default();
resources.insert(BufferRes::default());
resources.insert(A);
resources.insert(B);
run_system(&mut world, &mut resources, sys.system());
}
#[test]
#[should_panic]
fn conflicting_system_resources() {
fn sys(_: ResMut<BufferRes>, _: Res<BufferRes>) {}
test_for_conflicting_resources(sys.system())
}
#[test]
#[should_panic]
fn conflicting_system_resources_reverse_order() {
fn sys(_: Res<BufferRes>, _: ResMut<BufferRes>) {}
test_for_conflicting_resources(sys.system())
}
#[test]
#[should_panic]
fn conflicting_system_resources_multiple_mutable() {
fn sys(_: ResMut<BufferRes>, _: ResMut<BufferRes>) {}
test_for_conflicting_resources(sys.system())
}
#[test]
#[should_panic]
fn conflicting_changed_and_mutable_resource() {
// A tempting pattern, but unsound if allowed.
fn sys(_: ResMut<BufferRes>, _: ChangedRes<BufferRes>) {}
test_for_conflicting_resources(sys.system())
}
#[test]
#[should_panic]
fn conflicting_system_local_resources() {
fn sys(_: Local<BufferRes>, _: Local<BufferRes>) {}
test_for_conflicting_resources(sys.system())
}
#[test]
fn nonconflicting_system_resources() {
fn sys(_: Local<BufferRes>, _: ResMut<BufferRes>, _: Local<A>, _: ResMut<A>) {}
test_for_conflicting_resources(sys.system())
}
}
| 35.673406 | 154 | 0.538277 |
9b0252d8ecce55b00e7c4fd6533ae022c6259f3e | 41,799 | use dox::{mem, Option};
pub type rlim_t = ::uintptr_t;
pub type sa_family_t = u8;
pub type pthread_key_t = ::c_int;
pub type nfds_t = ::c_long;
pub type tcflag_t = ::c_uint;
pub type speed_t = ::c_uint;
pub type c_char = i8;
pub type clock_t = i32;
pub type clockid_t = i32;
pub type suseconds_t = i32;
pub type wchar_t = i32;
pub type off_t = i64;
pub type ino_t = i64;
pub type blkcnt_t = i64;
pub type blksize_t = i32;
pub type dev_t = i32;
pub type mode_t = u32;
pub type nlink_t = i32;
pub type useconds_t = u32;
pub type socklen_t = u32;
pub type pthread_t = ::uintptr_t;
pub type pthread_mutexattr_t = ::uintptr_t;
pub type pthread_rwlockattr_t = ::uintptr_t;
pub type sigset_t = u64;
pub type fsblkcnt_t = i64;
pub type fsfilcnt_t = i64;
pub type pthread_attr_t = *mut ::c_void;
pub type nl_item = ::c_int;
pub type id_t = i32;
pub type idtype_t = ::c_uint;
pub enum timezone {}
s! {
pub struct sockaddr {
pub sa_len: u8,
pub sa_family: sa_family_t,
pub sa_data: [::c_char; 30],
}
pub struct sockaddr_in {
pub sin_len: u8,
pub sin_family: sa_family_t,
pub sin_port: ::in_port_t,
pub sin_addr: ::in_addr,
pub sin_zero: [u8; 24],
}
pub struct sockaddr_in6 {
pub sin6_len: u8,
pub sin6_family: sa_family_t,
pub sin6_port: ::in_port_t,
pub sin6_flowinfo: u32,
pub sin6_addr: ::in6_addr,
pub sin6_scope_id: u32,
}
pub struct sockaddr_un {
pub sun_len: u8,
pub sun_family: sa_family_t,
pub sun_path: [::c_char; 126]
}
pub struct sockaddr_storage {
pub ss_len: u8,
pub ss_family: sa_family_t,
__ss_pad1: [u8; 6],
__ss_pad2: u64,
__ss_pad3: [u8; 112],
}
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: socklen_t,
pub ai_canonname: *mut c_char,
pub ai_addr: *mut ::sockaddr,
pub ai_next: *mut addrinfo,
}
pub struct fd_set {
fds_bits: [c_ulong; FD_SETSIZE / ULONG_SIZE],
}
pub struct tm {
pub tm_sec: ::c_int,
pub tm_min: ::c_int,
pub tm_hour: ::c_int,
pub tm_mday: ::c_int,
pub tm_mon: ::c_int,
pub tm_year: ::c_int,
pub tm_wday: ::c_int,
pub tm_yday: ::c_int,
pub tm_isdst: ::c_int,
pub tm_gmtoff: ::c_long,
pub tm_zone: *const ::c_char,
}
pub struct utsname {
pub sysname: [::c_char; 32],
pub nodename: [::c_char; 32],
pub release: [::c_char; 32],
pub version: [::c_char; 32],
pub machine: [::c_char; 32],
}
pub struct lconv {
pub decimal_point: *mut ::c_char,
pub thousands_sep: *mut ::c_char,
pub grouping: *mut ::c_char,
pub int_curr_symbol: *mut ::c_char,
pub currency_symbol: *mut ::c_char,
pub mon_decimal_point: *mut ::c_char,
pub mon_thousands_sep: *mut ::c_char,
pub mon_grouping: *mut ::c_char,
pub positive_sign: *mut ::c_char,
pub negative_sign: *mut ::c_char,
pub int_frac_digits: ::c_char,
pub frac_digits: ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub n_sign_posn: ::c_char,
pub int_p_cs_precedes: ::c_char,
pub int_p_sep_by_space: ::c_char,
pub int_n_cs_precedes: ::c_char,
pub int_n_sep_by_space: ::c_char,
pub int_p_sign_posn: ::c_char,
pub int_n_sign_posn: ::c_char,
}
pub struct msghdr {
pub msg_name: *mut ::c_void,
pub msg_namelen: ::socklen_t,
pub msg_iov: *mut ::iovec,
pub msg_iovlen: ::c_int,
pub msg_control: *mut ::c_void,
pub msg_controllen: ::socklen_t,
pub msg_flags: ::c_int,
}
pub struct cmsghdr {
pub cmsg_len: ::size_t,
pub cmsg_level: ::c_int,
pub cmsg_type: ::c_int,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
pub struct termios {
pub c_iflag: ::tcflag_t,
pub c_oflag: ::tcflag_t,
pub c_cflag: ::tcflag_t,
pub c_lflag: ::tcflag_t,
pub c_line: ::c_char,
pub c_ispeed: ::speed_t,
pub c_ospeed: ::speed_t,
pub c_cc: [::cc_t; ::NCCS],
}
pub struct flock {
pub l_type: ::c_short,
pub l_whence: ::c_short,
pub l_start: ::off_t,
pub l_len: ::off_t,
pub l_pid: ::pid_t,
}
pub struct stat {
pub st_dev: dev_t,
pub st_ino: ino_t,
pub st_mode: mode_t,
pub st_nlink: nlink_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_size: off_t,
pub st_rdev: dev_t,
pub st_blksize: blksize_t,
pub st_atime: time_t,
pub st_atime_nsec: c_long,
pub st_mtime: time_t,
pub st_mtime_nsec: c_long,
pub st_ctime: time_t,
pub st_ctime_nsec: c_long,
pub st_crtime: time_t,
pub st_crtime_nsec: c_long,
pub st_type: u32,
pub st_blocks: blkcnt_t,
}
pub struct dirent {
pub d_dev: dev_t,
pub d_pdev: dev_t,
pub d_ino: ino_t,
pub d_pino: i64,
pub d_reclen: ::c_ushort,
pub d_name: [::c_char; 1024], // Max length is _POSIX_PATH_MAX
}
pub struct glob_t {
pub gl_pathc: ::size_t,
__unused1: ::size_t,
pub gl_offs: ::size_t,
__unused2: ::size_t,
pub gl_pathv: *mut *mut c_char,
__unused3: *mut ::c_void,
__unused4: *mut ::c_void,
__unused5: *mut ::c_void,
__unused6: *mut ::c_void,
__unused7: *mut ::c_void,
__unused8: *mut ::c_void,
}
pub struct pthread_mutex_t {
flags: u32,
lock: i32,
unused: i32,
owner: i32,
owner_count: i32,
}
pub struct pthread_cond_t {
flags: u32,
unused: i32,
mutex: *mut ::c_void,
waiter_count: i32,
lock: i32,
}
pub struct pthread_rwlock_t {
flags: u32,
owner: i32,
lock_sem: i32, // this is actually a union
lock_count: i32,
reader_count: i32,
writer_count: i32,
waiters: [*mut ::c_void; 2],
}
pub struct passwd {
pub pw_name: *mut ::c_char,
pub pw_passwd: *mut ::c_char,
pub pw_uid: ::uid_t,
pub pw_gid: ::gid_t,
pub pw_dir: *mut ::c_char,
pub pw_shell: *mut ::c_char,
pub pw_gecos: *mut ::c_char,
}
pub struct statvfs {
pub f_bsize: ::c_ulong,
pub f_frsize: ::c_ulong,
pub f_blocks: ::fsblkcnt_t,
pub f_bfree: ::fsblkcnt_t,
pub f_bavail: ::fsblkcnt_t,
pub f_files: ::fsfilcnt_t,
pub f_ffree: ::fsfilcnt_t,
pub f_favail: ::fsfilcnt_t,
pub f_fsid: ::c_ulong,
pub f_flag: ::c_ulong,
pub f_namemax: ::c_ulong,
}
pub struct stack_t {
pub ss_sp: *mut ::c_void,
pub ss_size: ::size_t,
pub ss_flags: ::c_int,
}
pub struct siginfo_t {
pub si_signo: ::c_int,
pub si_code: ::c_int,
pub si_errno: ::c_int,
pub si_pid: ::pid_t,
pub si_uid: ::uid_t,
pub si_addr: *mut ::c_void,
pub si_status: ::c_int,
pub si_band: c_long,
pub sigval: *mut ::c_void,
}
pub struct sigaction {
pub sa_sigaction: ::sighandler_t,
pub sa_mask: ::sigset_t,
pub sa_flags: ::c_int,
sa_userdata: *mut ::c_void,
}
pub struct sigevent {
pub sigev_notify: ::c_int,
pub sigev_signo: ::c_int,
pub sigev_value: ::sigval,
__unused1: *mut ::c_void, // actually a function pointer
pub sigev_notify_attributes: *mut ::pthread_attr_t,
}
pub struct sem_t {
pub se_type: i32,
pub se_named_id: i32, // this is actually a union
pub se_unnamed: i32,
pub se_padding: [i32; 4],
}
pub struct pthread_condattr_t {
pub process_shared: bool,
pub clock_id: i32,
}
}
// intentionally not public, only used for fd_set
cfg_if! {
if #[cfg(target_pointer_width = "32")] {
const ULONG_SIZE: usize = 32;
} else if #[cfg(target_pointer_width = "64")] {
const ULONG_SIZE: usize = 64;
} else {
// Unknown target_pointer_width
}
}
pub const EXIT_FAILURE: ::c_int = 1;
pub const EXIT_SUCCESS: ::c_int = 0;
pub const RAND_MAX: ::c_int = 2147483647;
pub const EOF: ::c_int = -1;
pub const SEEK_SET: ::c_int = 0;
pub const SEEK_CUR: ::c_int = 1;
pub const SEEK_END: ::c_int = 2;
pub const _IOFBF: ::c_int = 0;
pub const _IONBF: ::c_int = 2;
pub const _IOLBF: ::c_int = 1;
pub const F_DUPFD: ::c_int = 0x0001;
pub const F_GETFD: ::c_int = 0x0002;
pub const F_SETFD: ::c_int = 0x0004;
pub const F_GETFL: ::c_int = 0x0008;
pub const F_SETFL: ::c_int = 0x0010;
pub const F_GETLK: ::c_int = 0x0020;
pub const F_SETLK: ::c_int = 0x0080;
pub const F_SETLKW: ::c_int = 0x0100;
pub const F_DUPFD_CLOEXEC: ::c_int = 0x0200;
pub const AT_FDCWD: ::c_int = -1;
pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x01;
pub const AT_SYMLINK_FOLLOW: ::c_int = 0x02;
pub const AT_REMOVEDIR: ::c_int = 0x04;
pub const AT_EACCESS: ::c_int = 0x08;
pub const POLLIN: ::c_short = 0x0001;
pub const POLLOUT: ::c_short = 0x0002;
pub const POLLRDNORM: ::c_short = POLLIN;
pub const POLLWRNORM: ::c_short = POLLOUT;
pub const POLLRDBAND: ::c_short = 0x0008;
pub const POLLWRBAND: ::c_short = 0x0010;
pub const POLLPRI: ::c_short = 0x0020;
pub const POLLERR: ::c_short = 0x0004;
pub const POLLHUP: ::c_short = 0x0080;
pub const POLLNVAL: ::c_short = 0x1000;
pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0;
pub const PTHREAD_CREATE_DETACHED: ::c_int = 1;
pub const CLOCK_REALTIME: ::c_int = -1;
pub const CLOCK_MONOTONIC: ::c_int = 0;
pub const RLIMIT_CORE: ::c_int = 0;
pub const RLIMIT_CPU: ::c_int = 1;
pub const RLIMIT_DATA: ::c_int = 2;
pub const RLIMIT_FSIZE: ::c_int = 3;
pub const RLIMIT_NOFILE: ::c_int = 4;
pub const RLIMIT_AS: ::c_int = 6;
// Haiku specific
pub const RLIMIT_NOVMON: ::c_int = 7;
pub const RLIMIT_NLIMITS: ::c_int = 8;
pub const RUSAGE_SELF: ::c_int = 0;
pub const RTLD_LAXY: ::c_int = 0;
pub const NCCS: usize = 11;
pub const O_RDONLY: ::c_int = 0x0000;
pub const O_WRONLY: ::c_int = 0x0001;
pub const O_RDWR: ::c_int = 0x0002;
pub const O_ACCMODE: ::c_int = 0x0003;
pub const O_EXCL: ::c_int = 0x0100;
pub const O_CREAT: ::c_int = 0x0200;
pub const O_TRUNC: ::c_int = 0x0400;
pub const O_NOCTTY: ::c_int = 0x1000;
pub const O_NOTRAVERSE: ::c_int = 0x2000;
pub const O_CLOEXEC: ::c_int = 0x00000040;
pub const O_NONBLOCK: ::c_int = 0x00000080;
pub const O_APPEND: ::c_int = 0x00000800;
pub const O_SYNC: ::c_int = 0x00010000;
pub const O_RSYNC: ::c_int = 0x00020000;
pub const O_DSYNC: ::c_int = 0x00040000;
pub const O_NOFOLLOW: ::c_int = 0x00080000;
pub const O_NOCACHE: ::c_int = 0x00100000;
pub const O_DIRECTORY: ::c_int = 0x00200000;
pub const S_IFIFO: ::mode_t = 61440;
pub const S_IFCHR: ::mode_t = 49152;
pub const S_IFBLK: ::mode_t = 24576;
pub const S_IFDIR: ::mode_t = 16384;
pub const S_IFREG: ::mode_t = 32768;
pub const S_IFLNK: ::mode_t = 40960;
pub const S_IFSOCK: ::mode_t = 49152;
pub const S_IFMT: ::mode_t = 61440;
pub const S_IRWXU: ::mode_t = 448;
pub const S_IXUSR: ::mode_t = 64;
pub const S_IWUSR: ::mode_t = 128;
pub const S_IRUSR: ::mode_t = 256;
pub const S_IRWXG: ::mode_t = 70;
pub const S_IXGRP: ::mode_t = 10;
pub const S_IWGRP: ::mode_t = 20;
pub const S_IRGRP: ::mode_t = 40;
pub const S_IRWXO: ::mode_t = 7;
pub const S_IXOTH: ::mode_t = 1;
pub const S_IWOTH: ::mode_t = 2;
pub const S_IROTH: ::mode_t = 4;
pub const F_OK: ::c_int = 0;
pub const R_OK: ::c_int = 4;
pub const W_OK: ::c_int = 2;
pub const X_OK: ::c_int = 1;
pub const STDIN_FILENO: ::c_int = 0;
pub const STDOUT_FILENO: ::c_int = 1;
pub const STDERR_FILENO: ::c_int = 2;
pub const SIGHUP: ::c_int = 1;
pub const SIGINT: ::c_int = 2;
pub const SIGQUIT: ::c_int = 3;
pub const SIGILL: ::c_int = 4;
pub const SIGCHLD: ::c_int = 5;
pub const SIGABRT: ::c_int = 6;
pub const SIGPIPE: ::c_int = 7;
pub const SIGFPE: ::c_int = 8;
pub const SIGKILL: ::c_int = 9;
pub const SIGSTOP: ::c_int = 10;
pub const SIGSEGV: ::c_int = 11;
pub const SIGCONT: ::c_int = 12;
pub const SIGTSTP: ::c_int = 13;
pub const SIGALRM: ::c_int = 14;
pub const SIGTERM: ::c_int = 15;
pub const SIGTTIN: ::c_int = 16;
pub const SIGTTOU: ::c_int = 17;
pub const SIGUSR1: ::c_int = 18;
pub const SIGUSR2: ::c_int = 19;
pub const SIGWINCH: ::c_int = 20;
pub const SIGKILLTHR: ::c_int = 21;
pub const SIGTRAP: ::c_int = 22;
pub const SIGPOLL: ::c_int = 23;
pub const SIGPROF: ::c_int = 24;
pub const SIGSYS: ::c_int = 25;
pub const SIGURG: ::c_int = 26;
pub const SIGVTALRM: ::c_int = 27;
pub const SIGXCPU: ::c_int = 28;
pub const SIGXFSZ: ::c_int = 29;
pub const SIGBUS: ::c_int = 30;
pub const SIG_BLOCK: ::c_int = 1;
pub const SIG_UNBLOCK: ::c_int = 2;
pub const SIG_SETMASK: ::c_int = 3;
pub const SIGEV_NONE: ::c_int = 0;
pub const SIGEV_SIGNAL: ::c_int = 1;
pub const SIGEV_THREAD: ::c_int = 2;
pub const EAI_AGAIN: ::c_int = 2;
pub const EAI_BADFLAGS: ::c_int = 3;
pub const EAI_FAIL: ::c_int = 4;
pub const EAI_FAMILY: ::c_int = 5;
pub const EAI_MEMORY: ::c_int = 6;
pub const EAI_NODATA: ::c_int = 7;
pub const EAI_NONAME: ::c_int = 8;
pub const EAI_SERVICE: ::c_int = 9;
pub const EAI_SOCKTYPE: ::c_int = 10;
pub const EAI_SYSTEM: ::c_int = 11;
pub const EAI_OVERFLOW: ::c_int = 14;
pub const PROT_NONE: ::c_int = 0;
pub const PROT_READ: ::c_int = 1;
pub const PROT_WRITE: ::c_int = 2;
pub const PROT_EXEC: ::c_int = 4;
pub const LC_ALL: ::c_int = 0;
pub const LC_COLLATE: ::c_int = 1;
pub const LC_CTYPE: ::c_int = 2;
pub const LC_MONETARY: ::c_int = 3;
pub const LC_NUMERIC: ::c_int = 4;
pub const LC_TIME: ::c_int = 5;
pub const LC_MESSAGES: ::c_int = 6;
// TODO: Haiku does not have MAP_FILE, but libstd/os.rs requires it
pub const MAP_FILE: ::c_int = 0x00;
pub const MAP_SHARED: ::c_int = 0x01;
pub const MAP_PRIVATE: ::c_int = 0x02;
pub const MAP_FIXED: ::c_int = 0x04;
pub const MAP_ANONYMOUS: ::c_int = 0x08;
pub const MAP_ANON: ::c_int = MAP_ANONYMOUS;
pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void;
pub const MS_ASYNC: ::c_int = 0x01;
pub const MS_INVALIDATE: ::c_int = 0x04;
pub const MS_SYNC: ::c_int = 0x02;
pub const E2BIG : ::c_int = -2147454975;
pub const ECHILD : ::c_int = -2147454974;
pub const EDEADLK : ::c_int = -2147454973;
pub const EFBIG : ::c_int = -2147454972;
pub const EMLINK : ::c_int = -2147454971;
pub const ENFILE : ::c_int = -2147454970;
pub const ENODEV : ::c_int = -2147454969;
pub const ENOLCK : ::c_int = -2147454968;
pub const ENOSYS : ::c_int = -2147454967;
pub const ENOTTY : ::c_int = -2147454966;
pub const ENXIO : ::c_int = -2147454965;
pub const ESPIPE : ::c_int = -2147454964;
pub const ESRCH : ::c_int = -2147454963;
pub const EFPOS : ::c_int = -2147457962;
pub const ESIGPARM : ::c_int = -2147457961;
pub const EDOM : ::c_int = -2147454960;
pub const ERANGE : ::c_int = -2147454959;
pub const EPROTOTYPE : ::c_int = -2147454958;
pub const EPROTONOSUPPORT : ::c_int = -2147454957;
pub const EPFNOSUPPORT : ::c_int = -2147454956;
pub const EAFNOSUPPORT : ::c_int = -2147454955;
pub const EADDRINUSE : ::c_int = -2147454954;
pub const EADDRNOTAVAIL : ::c_int = -2147454953;
pub const ENETDOWN : ::c_int = -2147454952;
pub const ENETUNREACH : ::c_int = -2147454951;
pub const ENETRESET : ::c_int = -2147454950;
pub const ECONNABORTED : ::c_int = -2147454949;
pub const ECONNRESET : ::c_int = -2147454948;
pub const EISCONN : ::c_int = -2147454947;
pub const ENOTCONN : ::c_int = -2147454946;
pub const ESHUTDOWN : ::c_int = -2147454945;
pub const ECONNREFUSED : ::c_int = -2147454944;
pub const EHOSTUNREACH : ::c_int = -2147454943;
pub const ENOPROTOOPT : ::c_int = -2147454942;
pub const ENOBUFS : ::c_int = -2147454941;
pub const EINPROGRESS : ::c_int = -2147454940;
pub const EALREADY : ::c_int = -2147454939;
pub const EILSEQ : ::c_int = -2147454938;
pub const ENOMSG : ::c_int = -2147454937;
pub const ESTALE : ::c_int = -2147454936;
pub const EOVERFLOW : ::c_int = -2147454935;
pub const EMSGSIZE : ::c_int = -2147454934;
pub const EOPNOTSUPP : ::c_int = -2147454933;
pub const ENOTSOCK : ::c_int = -2147454932;
pub const EHOSTDOWN : ::c_int = -2147454931;
pub const EBADMSG : ::c_int = -2147454930;
pub const ECANCELED : ::c_int = -2147454929;
pub const EDESTADDRREQ : ::c_int = -2147454928;
pub const EDQUOT : ::c_int = -2147454927;
pub const EIDRM : ::c_int = -2147454926;
pub const EMULTIHOP : ::c_int = -2147454925;
pub const ENODATA : ::c_int = -2147454924;
pub const ENOLINK : ::c_int = -2147454923;
pub const ENOSR : ::c_int = -2147454922;
pub const ENOSTR : ::c_int = -2147454921;
pub const ENOTSUP : ::c_int = -2147454920;
pub const EPROTO : ::c_int = -2147454919;
pub const ETIME : ::c_int = -2147454918;
pub const ETXTBSY : ::c_int = -2147454917;
pub const ENOATTR : ::c_int = -2147454916;
// INT_MIN
pub const ENOMEM : ::c_int = -2147454976;
// POSIX errors that can be mapped to BeOS error codes
pub const EACCES : ::c_int = -2147483646;
pub const EINTR : ::c_int = -2147483638;
pub const EIO : ::c_int = -2147483647;
pub const EBUSY : ::c_int = -2147483634;
pub const EFAULT : ::c_int = -2147478783;
pub const ETIMEDOUT : ::c_int = -2147483639;
pub const EAGAIN : ::c_int = -2147483637;
pub const EWOULDBLOCK : ::c_int = -2147483637;
pub const EBADF : ::c_int = -2147459072;
pub const EEXIST : ::c_int = -2147459070;
pub const EINVAL : ::c_int = -2147483643;
pub const ENAMETOOLONG : ::c_int = -2147459068;
pub const ENOENT : ::c_int = -2147459069;
pub const EPERM : ::c_int = -2147483633;
pub const ENOTDIR : ::c_int = -2147459067;
pub const EISDIR : ::c_int = -2147459063;
pub const ENOTEMPTY : ::c_int = -2147459066;
pub const ENOSPC : ::c_int = -2147459065;
pub const EROFS : ::c_int = -2147459064;
pub const EMFILE : ::c_int = -2147459062;
pub const EXDEV : ::c_int = -2147459061;
pub const ELOOP : ::c_int = -2147459060;
pub const ENOEXEC : ::c_int = -2147478782;
pub const EPIPE : ::c_int = -2147459059;
pub const IPPROTO_RAW: ::c_int = 255;
// These are prefixed with POSIX_ on Haiku
pub const MADV_NORMAL: ::c_int = 1;
pub const MADV_SEQUENTIAL: ::c_int = 2;
pub const MADV_RANDOM: ::c_int = 3;
pub const MADV_WILLNEED: ::c_int = 4;
pub const MADV_DONTNEED: ::c_int = 5;
// https://github.com/haiku/haiku/blob/master/headers/posix/net/if.h#L80
pub const IFF_UP: ::c_int = 0x0001;
pub const IFF_BROADCAST: ::c_int = 0x0002; // valid broadcast address
pub const IFF_LOOPBACK: ::c_int = 0x0008;
pub const IFF_POINTOPOINT: ::c_int = 0x0010; // point-to-point link
pub const IFF_NOARP: ::c_int = 0x0040; // no address resolution
pub const IFF_AUTOUP: ::c_int = 0x0080; // auto dial
pub const IFF_PROMISC: ::c_int = 0x0100; // receive all packets
pub const IFF_ALLMULTI: ::c_int = 0x0200; // receive all multicast packets
pub const IFF_SIMPLEX: ::c_int = 0x0800; // doesn't receive own transmissions
pub const IFF_LINK: ::c_int = 0x1000; // has link
pub const IFF_AUTO_CONFIGURED: ::c_int = 0x2000;
pub const IFF_CONFIGURING: ::c_int = 0x4000;
pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast
pub const AF_UNSEC: ::c_int = 0;
pub const AF_INET: ::c_int = 1;
pub const AF_APPLETALK: ::c_int = 2;
pub const AF_ROUTE: ::c_int = 3;
pub const AF_LINK: ::c_int = 4;
pub const AF_INET6: ::c_int = 5;
pub const AF_DLI: ::c_int = 6;
pub const AF_IPX: ::c_int = 7;
pub const AF_NOTIFY: ::c_int = 8;
pub const AF_LOCAL: ::c_int = 9;
pub const AF_UNIX: ::c_int = AF_LOCAL;
pub const AF_BLUETOOTH: ::c_int = 10;
pub const AF_MAX: ::c_int = 11;
pub const IP_MULTICAST_TTL: ::c_int = 10;
pub const IP_MULTICAST_LOOP: ::c_int = 11;
pub const IP_TTL: ::c_int = 4;
pub const IP_HDRINCL: ::c_int = 2;
pub const IP_ADD_MEMBERSHIP: ::c_int = 12;
pub const IP_DROP_MEMBERSHIP: ::c_int = 13;
pub const TCP_NODELAY: ::c_int = 0x01;
pub const TCP_MAXSEG: ::c_int = 0x02;
pub const TCP_NOPUSH: ::c_int = 0x04;
pub const TCP_NOOPT: ::c_int = 0x08;
pub const IPV6_MULTICAST_LOOP: ::c_int = 26;
pub const IPV6_JOIN_GROUP: ::c_int = 28;
pub const IPV6_LEAVE_GROUP: ::c_int = 29;
pub const IPV6_V6ONLY: ::c_int = 30;
pub const MSG_OOB: ::c_int = 0x0001;
pub const MSG_PEEK: ::c_int = 0x0002;
pub const MSG_DONTROUTE: ::c_int = 0x0004;
pub const MSG_EOR: ::c_int = 0x0008;
pub const MSG_TRUNC: ::c_int = 0x0010;
pub const MSG_CTRUNC: ::c_int = 0x0020;
pub const MSG_WAITALL: ::c_int = 0x0040;
pub const MSG_DONTWAIT: ::c_int = 0x0080;
pub const MSG_BCAST: ::c_int = 0x0100;
pub const MSG_MCAST: ::c_int = 0x0200;
pub const MSG_EOF: ::c_int = 0x0400;
pub const MSG_NOSIGNAL: ::c_int = 0x0800;
pub const SHUT_RD: ::c_int = 0;
pub const SHUT_WR: ::c_int = 1;
pub const SHUT_RDWR: ::c_int = 2;
pub const LOCK_SH: ::c_int = 0x01;
pub const LOCK_EX: ::c_int = 0x02;
pub const LOCK_NB: ::c_int = 0x04;
pub const LOCK_UN: ::c_int = 0x08;
pub const SIGSTKSZ: ::size_t = 16384;
pub const PATH_MAX: ::c_int = 1024;
pub const SA_NOCLDSTOP: ::c_int = 0x01;
pub const SA_NOCLDWAIT: ::c_int = 0x02;
pub const SA_RESETHAND: ::c_int = 0x04;
pub const SA_NODEFER: ::c_int = 0x08;
pub const SA_RESTART: ::c_int = 0x10;
pub const SA_ONSTACK: ::c_int = 0x20;
pub const SA_SIGINFO: ::c_int = 0x40;
pub const SA_NOMASK: ::c_int = SA_NODEFER;
pub const SA_STACK: ::c_int = SA_ONSTACK;
pub const SA_ONESHOT: ::c_int = SA_RESETHAND;
pub const FD_SETSIZE: usize = 1024;
pub const RTLD_NOW: ::c_int = 0x1;
pub const RTLD_DEFAULT: *mut ::c_void = 0isize as *mut ::c_void;
pub const BUFSIZ: ::c_uint = 8192;
pub const FILENAME_MAX: ::c_uint = 256;
pub const FOPEN_MAX: ::c_uint = 128;
pub const L_tmpnam: ::c_uint = 512;
pub const TMP_MAX: ::c_uint = 32768;
pub const _PC_CHOWN_RESTRICTED: ::c_int = 1;
pub const _PC_MAX_CANON: ::c_int = 2;
pub const _PC_MAX_INPUT: ::c_int = 3;
pub const _PC_NAME_MAX: ::c_int = 4;
pub const _PC_NO_TRUNC: ::c_int = 5;
pub const _PC_PATH_MAX: ::c_int = 6;
pub const _PC_PIPE_BUF: ::c_int = 7;
pub const _PC_VDISABLE: ::c_int = 8;
pub const _PC_LINK_MAX: ::c_int = 25;
pub const _PC_SYNC_IO: ::c_int = 26;
pub const _PC_ASYNC_IO: ::c_int = 27;
pub const _PC_PRIO_IO: ::c_int = 28;
pub const _PC_SOCK_MAXBUF: ::c_int = 29;
pub const _PC_FILESIZEBITS: ::c_int = 30;
pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 31;
pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 32;
pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 33;
pub const _PC_REC_XFER_ALIGN: ::c_int = 34;
pub const _PC_ALLOC_SIZE_MIN: ::c_int = 35;
pub const _PC_SYMLINK_MAX: ::c_int = 36;
pub const _PC_2_SYMLINKS: ::c_int = 37;
pub const _PC_XATTR_EXISTS: ::c_int = 38;
pub const _PC_XATTR_ENABLED: ::c_int = 39;
pub const FIONBIO: ::c_int = 0xbe000000;
pub const _SC_ARG_MAX : ::c_int = 15;
pub const _SC_CHILD_MAX : ::c_int = 16;
pub const _SC_CLK_TCK : ::c_int = 17;
pub const _SC_JOB_CONTROL : ::c_int = 18;
pub const _SC_NGROUPS_MAX : ::c_int = 19;
pub const _SC_OPEN_MAX : ::c_int = 20;
pub const _SC_SAVED_IDS : ::c_int = 21;
pub const _SC_STREAM_MAX : ::c_int = 22;
pub const _SC_TZNAME_MAX : ::c_int = 23;
pub const _SC_VERSION : ::c_int = 24;
pub const _SC_GETGR_R_SIZE_MAX : ::c_int = 25;
pub const _SC_GETPW_R_SIZE_MAX : ::c_int = 26;
pub const _SC_PAGESIZE : ::c_int = 27;
pub const _SC_PAGE_SIZE : ::c_int = 27;
pub const _SC_SEM_NSEMS_MAX : ::c_int = 28;
pub const _SC_SEM_VALUE_MAX : ::c_int = 29;
pub const _SC_SEMAPHORES : ::c_int = 30;
pub const _SC_THREADS : ::c_int = 31;
pub const _SC_IOV_MAX : ::c_int = 32;
pub const _SC_UIO_MAXIOV : ::c_int = 32;
pub const _SC_NPROCESSORS_CONF : ::c_int = 34;
pub const _SC_NPROCESSORS_ONLN : ::c_int = 35;
pub const _SC_ATEXIT_MAX : ::c_int = 37;
pub const _SC_PASS_MAX : ::c_int = 39;
pub const _SC_PHYS_PAGES : ::c_int = 40;
pub const _SC_AVPHYS_PAGES : ::c_int = 41;
pub const _SC_PIPE : ::c_int = 42;
pub const _SC_SELECT : ::c_int = 43;
pub const _SC_POLL : ::c_int = 44;
pub const _SC_MAPPED_FILES : ::c_int = 45;
pub const _SC_THREAD_PROCESS_SHARED : ::c_int = 46;
pub const _SC_THREAD_STACK_MIN : ::c_int = 47;
pub const _SC_THREAD_ATTR_STACKADDR : ::c_int = 48;
pub const _SC_THREAD_ATTR_STACKSIZE : ::c_int = 49;
pub const _SC_THREAD_PRIORITY_SCHEDULING : ::c_int = 50;
pub const _SC_REALTIME_SIGNALS : ::c_int = 51;
pub const _SC_MEMORY_PROTECTION : ::c_int = 52;
pub const _SC_SIGQUEUE_MAX : ::c_int = 53;
pub const _SC_RTSIG_MAX : ::c_int = 54;
pub const _SC_MONOTONIC_CLOCK : ::c_int = 55;
pub const _SC_DELAYTIMER_MAX : ::c_int = 56;
pub const _SC_TIMER_MAX : ::c_int = 57;
pub const _SC_TIMERS : ::c_int = 58;
pub const _SC_CPUTIME : ::c_int = 59;
pub const _SC_THREAD_CPUTIME : ::c_int = 60;
pub const PTHREAD_STACK_MIN: ::size_t = 8192;
pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
flags: 0,
lock: 0,
unused: -42,
owner: -1,
owner_count: 0,
};
pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
flags: 0,
unused: -42,
mutex: 0 as *mut _,
waiter_count: 0,
lock: 0,
};
pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t {
flags: 0,
owner: -1,
lock_sem: 0,
lock_count: 0,
reader_count: 0,
writer_count: 0,
waiters: [0 as *mut _; 2],
};
pub const PTHREAD_MUTEX_DEFAULT: ::c_int = 0;
pub const PTHREAD_MUTEX_NORMAL: ::c_int = 1;
pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2;
pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 3;
pub const FIOCLEX: c_ulong = 0; // TODO: does not exist on Haiku!
pub const RUSAGE_CHILDREN: ::c_int = -1;
pub const SOCK_STREAM: ::c_int = 1;
pub const SOCK_DGRAM: ::c_int = 2;
pub const SOCK_RAW: ::c_int = 3;
pub const SOCK_SEQPACKET: ::c_int = 5;
pub const SOL_SOCKET: ::c_int = -1;
pub const SO_ACCEPTCONN: ::c_int = 0x00000001;
pub const SO_BROADCAST: ::c_int = 0x00000002;
pub const SO_DEBUG: ::c_int = 0x00000004;
pub const SO_DONTROUTE: ::c_int = 0x00000008;
pub const SO_KEEPALIVE: ::c_int = 0x00000010;
pub const SO_OOBINLINE: ::c_int = 0x00000020;
pub const SO_REUSEADDR: ::c_int = 0x00000040;
pub const SO_REUSEPORT: ::c_int = 0x00000080;
pub const SO_USELOOPBACK: ::c_int = 0x00000100;
pub const SO_LINGER: ::c_int = 0x00000200;
pub const SO_SNDBUF: ::c_int = 0x40000001;
pub const SO_SNDLOWAT: ::c_int = 0x40000002;
pub const SO_SNDTIMEO: ::c_int = 0x40000003;
pub const SO_RCVBUF: ::c_int = 0x40000004;
pub const SO_RCVLOWAT: ::c_int = 0x40000005;
pub const SO_RCVTIMEO: ::c_int = 0x40000006;
pub const SO_ERROR: ::c_int = 0x40000007;
pub const SO_TYPE: ::c_int = 0x40000008;
pub const SO_NONBLOCK: ::c_int = 0x40000009;
pub const SO_BINDTODEVICE: ::c_int = 0x4000000a;
pub const SO_PEERCRED: ::c_int = 0x4000000b;
pub const SCM_RIGHTS: ::c_int = 0x01;
pub const NI_MAXHOST: ::size_t = 1025;
pub const WNOHANG: ::c_int = 0x01;
pub const WUNTRACED: ::c_int = 0x02;
pub const WCONTINUED: ::c_int = 0x04;
pub const WEXITED: ::c_int = 0x08;
pub const WSTOPPED: ::c_int = 0x10;
pub const WNOWAIT: ::c_int = 0x20;
pub const P_ALL: idtype_t = 0;
pub const P_PID: idtype_t = 1;
pub const P_PGID: idtype_t = 2;
pub const VINTR: usize = 0;
pub const VQUIT: usize = 1;
pub const VERASE: usize = 2;
pub const VKILL: usize = 3;
pub const VEOF: usize = 4;
pub const VEOL: usize = 5;
pub const VMIN: usize = 4;
pub const VTIME: usize = 5;
pub const VEOL2: usize = 6;
pub const VSWTCH: usize = 7;
pub const VSTART: usize = 8;
pub const VSTOP: usize = 9;
pub const VSUSP: usize = 10;
pub const IGNBRK: ::tcflag_t = 0x01;
pub const BRKINT: ::tcflag_t = 0x02;
pub const IGNPAR: ::tcflag_t = 0x04;
pub const PARMRK: ::tcflag_t = 0x08;
pub const INPCK: ::tcflag_t = 0x10;
pub const ISTRIP: ::tcflag_t = 0x20;
pub const INLCR: ::tcflag_t = 0x40;
pub const IGNCR: ::tcflag_t = 0x80;
pub const ICRNL: ::tcflag_t = 0x100;
pub const IUCLC: ::tcflag_t = 0x200;
pub const IXON: ::tcflag_t = 0x400;
pub const IXANY: ::tcflag_t = 0x800;
pub const IXOFF: ::tcflag_t = 0x1000;
pub const OPOST: ::tcflag_t = 0x00000001;
pub const OLCUC: ::tcflag_t = 0x00000002;
pub const ONLCR: ::tcflag_t = 0x00000004;
pub const OCRNL: ::tcflag_t = 0x00000008;
pub const ONOCR: ::tcflag_t = 0x00000010;
pub const ONLRET: ::tcflag_t = 0x00000020;
pub const OFILL: ::tcflag_t = 0x00000040;
pub const OFDEL: ::tcflag_t = 0x00000080;
pub const NLDLY: ::tcflag_t = 0x00000100;
pub const NL0: ::tcflag_t = 0x00000000;
pub const NL1: ::tcflag_t = 0x00000100;
pub const CRDLY: ::tcflag_t = 0x00000600;
pub const CR0: ::tcflag_t = 0x00000000;
pub const CR1: ::tcflag_t = 0x00000200;
pub const CR2: ::tcflag_t = 0x00000400;
pub const CR3: ::tcflag_t = 0x00000600;
pub const TABDLY: ::tcflag_t = 0x00001800;
pub const TAB0: ::tcflag_t = 0x00000000;
pub const TAB1: ::tcflag_t = 0x00000800;
pub const TAB2: ::tcflag_t = 0x00001000;
pub const TAB3: ::tcflag_t = 0x00001800;
pub const BSDLY: ::tcflag_t = 0x00002000;
pub const BS0: ::tcflag_t = 0x00000000;
pub const BS1: ::tcflag_t = 0x00002000;
pub const VTDLY: ::tcflag_t = 0x00004000;
pub const VT0: ::tcflag_t = 0x00000000;
pub const VT1: ::tcflag_t = 0x00004000;
pub const FFDLY: ::tcflag_t = 0x00008000;
pub const FF0: ::tcflag_t = 0x00000000;
pub const FF1: ::tcflag_t = 0x00008000;
pub const CSIZE: ::tcflag_t = 0x00000020;
pub const CS5: ::tcflag_t = 0x00000000;
pub const CS6: ::tcflag_t = 0x00000000;
pub const CS7: ::tcflag_t = 0x00000000;
pub const CS8: ::tcflag_t = 0x00000020;
pub const CSTOPB: ::tcflag_t = 0x00000040;
pub const CREAD: ::tcflag_t = 0x00000080;
pub const PARENB: ::tcflag_t = 0x00000100;
pub const PARODD: ::tcflag_t = 0x00000200;
pub const HUPCL: ::tcflag_t = 0x00000400;
pub const CLOCAL: ::tcflag_t = 0x00000800;
pub const XLOBLK: ::tcflag_t = 0x00001000;
pub const CTSFLOW: ::tcflag_t = 0x00002000;
pub const RTSFLOW: ::tcflag_t = 0x00004000;
pub const CRTSCTS: ::tcflag_t = RTSFLOW | CTSFLOW;
pub const ISIG: ::tcflag_t = 0x00000001;
pub const ICANON: ::tcflag_t = 0x00000002;
pub const XCASE: ::tcflag_t = 0x00000004;
pub const ECHO: ::tcflag_t = 0x00000008;
pub const ECHOE: ::tcflag_t = 0x00000010;
pub const ECHOK: ::tcflag_t = 0x00000020;
pub const ECHONL: ::tcflag_t = 0x00000040;
pub const NOFLSH: ::tcflag_t = 0x00000080;
pub const TOSTOP: ::tcflag_t = 0x00000100;
pub const IEXTEN: ::tcflag_t = 0x00000200;
pub const ECHOCTL: ::tcflag_t = 0x00000400;
pub const ECHOPRT: ::tcflag_t = 0x00000800;
pub const ECHOKE: ::tcflag_t = 0x00001000;
pub const FLUSHO: ::tcflag_t = 0x00002000;
pub const PENDIN: ::tcflag_t = 0x00004000;
pub const TCGB_CTS: ::c_int = 0x01;
pub const TCGB_DSR: ::c_int = 0x02;
pub const TCGB_RI: ::c_int = 0x04;
pub const TCGB_DCD: ::c_int = 0x08;
pub const TIOCM_CTS: ::c_int = TCGB_CTS;
pub const TIOCM_CD: ::c_int = TCGB_DCD;
pub const TIOCM_CAR: ::c_int = TIOCM_CD;
pub const TIOCM_RI: ::c_int = TCGB_RI;
pub const TIOCM_DSR: ::c_int = TCGB_DSR;
pub const TIOCM_DTR: ::c_int = 0x10;
pub const TIOCM_RTS: ::c_int = 0x20;
pub const B0: speed_t = 0x00;
pub const B50: speed_t = 0x01;
pub const B75: speed_t = 0x02;
pub const B110: speed_t = 0x03;
pub const B134: speed_t = 0x04;
pub const B150: speed_t = 0x05;
pub const B200: speed_t = 0x06;
pub const B300: speed_t = 0x07;
pub const B600: speed_t = 0x08;
pub const B1200: speed_t = 0x09;
pub const B1800: speed_t = 0x0A;
pub const B2400: speed_t = 0x0B;
pub const B4800: speed_t = 0x0C;
pub const B9600: speed_t = 0x0D;
pub const B19200: speed_t = 0x0E;
pub const B38400: speed_t = 0x0F;
pub const B57600: speed_t = 0x10;
pub const B115200: speed_t = 0x11;
pub const B230400: speed_t = 0x12;
pub const B31250: speed_t = 0x13;
pub const TCSANOW: ::c_int = 0x01;
pub const TCSADRAIN: ::c_int = 0x02;
pub const TCSAFLUSH: ::c_int = 0x04;
pub const TCOOFF: ::c_int = 0x01;
pub const TCOON: ::c_int = 0x02;
pub const TCIOFF: ::c_int = 0x04;
pub const TCION: ::c_int = 0x08;
pub const TCIFLUSH: ::c_int = 0x01;
pub const TCOFLUSH: ::c_int = 0x02;
pub const TCIOFLUSH: ::c_int = 0x03;
f! {
pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] &= !(1 << (fd % size));
return
}
pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0
}
pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] |= 1 << (fd % size);
return
}
pub fn FD_ZERO(set: *mut fd_set) -> () {
for slot in (*set).fds_bits.iter_mut() {
*slot = 0;
}
}
pub fn WIFEXITED(status: ::c_int) -> bool {
(status & !0xff) == 0
}
pub fn WEXITSTATUS(status: ::c_int) -> ::c_int {
(status & 0xff)
}
pub fn WIFSIGNALED(status: ::c_int) -> bool {
((status >> 8) & 0xff) != 0
}
pub fn WTERMSIG(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub fn WIFSTOPPED(status: ::c_int) -> bool {
((status >> 16) & 0xff) != 0
}
pub fn WSTOPSIG(status: ::c_int) -> ::c_int {
(status >> 16) & 0xff
}
// actually WIFCORED, but this is used everywhere else
pub fn WCOREDUMP(status: ::c_int) -> bool {
(status & 0x10000) != 0
}
pub fn WIFCONTINUED(status: ::c_int) -> bool {
(status & 0x20000) != 0
}
}
#[link(name = "bsd")]
extern {
pub fn clock_gettime(clk_id: ::c_int, tp: *mut ::timespec) -> ::c_int;
pub fn clock_settime(clk_id: ::c_int, tp: *const ::timespec) -> ::c_int;
pub fn pthread_create(thread: *mut ::pthread_t,
attr: *const ::pthread_attr_t,
f: extern fn(*mut ::c_void) -> *mut ::c_void,
value: *mut ::c_void) -> ::c_int;
pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t,
guardsize: *mut ::size_t) -> ::c_int;
pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t,
stackaddr: *mut *mut ::c_void,
stacksize: *mut ::size_t) -> ::c_int;
pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t,
clock_id: *mut clockid_t) -> ::c_int;
pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t,
clock_id: ::clockid_t) -> ::c_int;
pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void;
pub fn setgroups(ngroups: ::size_t,
ptr: *const ::gid_t) -> ::c_int;
pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int;
pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int)
-> ::c_int;
pub fn dirfd(dirp: *mut ::DIR) -> ::c_int;
pub fn getnameinfo(sa: *const ::sockaddr,
salen: ::socklen_t,
host: *mut ::c_char,
hostlen: ::size_t,
serv: *mut ::c_char,
sevlen: ::size_t,
flags: ::c_int) -> ::c_int;
pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t,
abstime: *const ::timespec) -> ::c_int;
pub fn waitid(idtype: idtype_t, id: id_t, infop: *mut ::siginfo_t,
options: ::c_int) -> ::c_int;
pub fn fdopendir(fd: ::c_int) -> *mut ::DIR;
pub fn glob(pattern: *const ::c_char,
flags: ::c_int,
errfunc: Option<extern fn(epath: *const ::c_char,
errno: ::c_int) -> ::c_int>,
pglob: *mut ::glob_t) -> ::c_int;
pub fn globfree(pglob: *mut ::glob_t);
pub fn posix_madvise(addr: *mut ::c_void, len: ::size_t, advice: ::c_int)
-> ::c_int;
pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t)
-> ::c_int;
pub fn shm_unlink(name: *const ::c_char) -> ::c_int;
pub fn seekdir(dirp: *mut ::DIR, loc: ::c_long);
pub fn telldir(dirp: *mut ::DIR) -> ::c_long;
pub fn madvise(addr: *mut ::c_void, len: ::size_t, advice: ::c_int)
-> ::c_int;
pub fn msync(addr: *mut ::c_void, len: ::size_t, flags: ::c_int) -> ::c_int;
pub fn recvfrom(socket: ::c_int, buf: *mut ::c_void, len: ::size_t,
flags: ::c_int, addr: *mut ::sockaddr,
addrlen: *mut ::socklen_t) -> ::ssize_t;
pub fn mkstemps(template: *mut ::c_char, suffixlen: ::c_int) -> ::c_int;
pub fn futimes(fd: ::c_int, times: *const ::timeval) -> ::c_int;
pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int;
pub fn nl_langinfo(item: ::nl_item) -> *mut ::c_char;
pub fn bind(socket: ::c_int, address: *const ::sockaddr,
address_len: ::socklen_t) -> ::c_int;
pub fn writev(fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int) -> ::ssize_t;
pub fn readv(fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int) -> ::ssize_t;
pub fn sendmsg(fd: ::c_int,
msg: *const ::msghdr,
flags: ::c_int) -> ::ssize_t;
pub fn recvmsg(fd: ::c_int, msg: *mut ::msghdr, flags: ::c_int)
-> ::ssize_t;
pub fn execvpe(file: *const ::c_char, argv: *const *const ::c_char,
environment: *const *const ::c_char) -> ::c_int;
#[cfg_attr(target_os = "solaris", link_name = "__posix_getgrgid_r")]
pub fn getgrgid_r(uid: ::uid_t,
grp: *mut ::group,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut ::group) -> ::c_int;
#[cfg_attr(all(target_os = "macos", target_arch = "x86"),
link_name = "sigaltstack$UNIX2003")]
#[cfg_attr(target_os = "netbsd", link_name = "__sigaltstack14")]
pub fn sigaltstack(ss: *const stack_t,
oss: *mut stack_t) -> ::c_int;
pub fn sem_close(sem: *mut sem_t) -> ::c_int;
pub fn getdtablesize() -> ::c_int;
#[cfg_attr(target_os = "solaris", link_name = "__posix_getgrnam_r")]
pub fn getgrnam_r(name: *const ::c_char,
grp: *mut ::group,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut ::group) -> ::c_int;
#[cfg_attr(all(target_os = "macos", target_arch = "x86"),
link_name = "pthread_sigmask$UNIX2003")]
pub fn pthread_sigmask(how: ::c_int, set: *const sigset_t,
oldset: *mut sigset_t) -> ::c_int;
pub fn sem_open(name: *const ::c_char, oflag: ::c_int, ...) -> *mut sem_t;
pub fn getgrnam(name: *const ::c_char) -> *mut ::group;
pub fn pthread_kill(thread: ::pthread_t, sig: ::c_int) -> ::c_int;
pub fn sem_unlink(name: *const ::c_char) -> ::c_int;
pub fn daemon(nochdir: ::c_int, noclose: ::c_int) -> ::c_int;
#[cfg_attr(target_os = "netbsd", link_name = "__getpwnam_r50")]
#[cfg_attr(target_os = "solaris", link_name = "__posix_getpwnam_r")]
pub fn getpwnam_r(name: *const ::c_char,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd) -> ::c_int;
#[cfg_attr(target_os = "netbsd", link_name = "__getpwuid_r50")]
#[cfg_attr(target_os = "solaris", link_name = "__posix_getpwuid_r")]
pub fn getpwuid_r(uid: ::uid_t,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd) -> ::c_int;
#[cfg_attr(all(target_os = "macos", target_arch ="x86"),
link_name = "sigwait$UNIX2003")]
#[cfg_attr(target_os = "solaris", link_name = "__posix_sigwait")]
pub fn sigwait(set: *const sigset_t,
sig: *mut ::c_int) -> ::c_int;
pub fn pthread_atfork(prepare: Option<unsafe extern fn()>,
parent: Option<unsafe extern fn()>,
child: Option<unsafe extern fn()>) -> ::c_int;
pub fn getgrgid(gid: ::gid_t) -> *mut ::group;
#[cfg_attr(all(target_os = "macos", target_arch = "x86"),
link_name = "popen$UNIX2003")]
pub fn popen(command: *const c_char,
mode: *const c_char) -> *mut ::FILE;
pub fn openpty(amaster: *mut ::c_int,
aslave: *mut ::c_int,
name: *mut ::c_char,
termp: *mut termios,
winp: *mut ::winsize) -> ::c_int;
pub fn forkpty(amaster: *mut ::c_int,
name: *mut ::c_char,
termp: *mut termios,
winp: *mut ::winsize) -> ::pid_t;
pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int;
}
cfg_if! {
if #[cfg(target_pointer_width = "64")] {
mod b64;
pub use self::b64::*;
} else {
mod b32;
pub use self::b32::*;
}
}
| 34.233415 | 80 | 0.628771 |
5bd67563ccd257a5dd54365c0af49b76e68ceccb | 935 | use crate::glot_run::config;
use crate::glot_run::api;
use crate::glot_run::language;
use crate::glot_run::datastore;
pub fn handle(config: &config::Config, request: &mut tiny_http::Request) -> Result<api::SuccessResponse, api::ErrorResponse> {
api::check_access_token(&config.api.admin_access_token, request)?;
let language_data: language::LanguageData = api::read_json_body(request)?;
let language = language::new(&language_data);
let data_root = config.server.data_root.lock().unwrap();
datastore::add_entry(&data_root.languages_path(), &language.id, &language)
.map_err(handle_datastore_error)?;
api::prepare_json_response(&language)
}
fn handle_datastore_error(err: datastore::AddError) -> api::ErrorResponse {
api::ErrorResponse{
status_code: 500,
body: api::ErrorBody{
error: "datastore".to_string(),
message: err.to_string(),
}
}
}
| 31.166667 | 126 | 0.690909 |
22abe2f07311c891100ac2a103521e3ccdce9ca4 | 9,668 | use super::opcode::Trapcode;
use std::io::{Read, Write};
const KEYBOARD_STATUS_REGISTER: u16 = 0xFE00;
const KEYBOARD_DATA_REGISTER: u16 = 0xFE02;
const F_POS: u16 = 1;
const F_ZERO: u16 = 1 << 1;
const F_NEG: u16 = 1 << 2;
#[derive(Debug)]
pub struct State {
/// Array of registers R0 through R7
pub registers: [u16; 8],
/// Program counter
pub pc: u16,
/// Condition flags, only the first three bits are relevant
pub cflags: u16,
/// Array holding the entire memory
pub memory: [u16; u16::MAX as usize],
/// Whether the vm is running or not
pub running: bool,
}
impl State {
pub fn new() -> State {
State {
registers: [0; 8],
pc: 0x300,
cflags: 0,
memory: [0; u16::MAX as usize],
running: true,
}
}
pub fn add(&mut self, instruction: u16) {
let dr = get_dr(instruction);
let sr1 = (instruction >> 6) & 0x07;
if ((instruction >> 5) & 0x1) == 0 {
let sr2 = instruction & 0x07;
self.registers[dr as usize] =
self.registers[sr1 as usize] + self.registers[sr2 as usize];
} else {
let imm5 = sign_extend(instruction & 0x1F, 5);
self.registers[dr as usize] = self.registers[sr1 as usize] + imm5;
}
self.update_flags(dr);
}
pub fn and(&mut self, instruction: u16) {
let dr = get_dr(instruction);
let sr1 = (instruction >> 6) & 0x07;
if ((instruction >> 5) & 0x1) == 0 {
let sr2 = instruction & 0x07;
self.registers[dr as usize] =
self.registers[sr1 as usize] & self.registers[sr2 as usize];
} else {
let imm5 = sign_extend(instruction & 0x1F, 5);
self.registers[dr as usize] = self.registers[sr1 as usize] & imm5;
}
self.update_flags(dr);
}
pub fn conditional_branch(&mut self, instruction: u16) {
let condition_flag = (instruction >> 9) & 0x07;
if (condition_flag & self.cflags) != 0 {
let offset = sign_extend(instruction & 0x01FF, 9);
self.pc += offset;
}
}
pub fn jump(&mut self, instruction: u16) {
let base_register = (instruction >> 6) & 0x07;
self.pc = self.registers[base_register as usize];
}
pub fn jump_to_subroutine(&mut self, instruction: u16) {
self.registers[7] = self.pc;
if ((instruction >> 11) & 0x1) == 0 {
let base_register = (instruction >> 6) & 0x07;
self.pc = self.registers[base_register as usize];
} else {
let offset = sign_extend(instruction & 0x07FF, 11);
self.pc += offset;
}
}
pub fn load(&mut self, instruction: u16) {
let dr = get_dr(instruction);
let offset = sign_extend(instruction & 0x01FF, 9);
self.registers[dr as usize] = self.mem_read(self.pc + offset);
self.update_flags(dr);
}
pub fn load_indirect(&mut self, instruction: u16) {
let dr = get_dr(instruction);
let offset = sign_extend(instruction & 0x01FF, 9);
let address = self.mem_read(self.pc + offset);
self.registers[dr as usize] = self.mem_read(address);
self.update_flags(dr);
}
pub fn load_base_plus_offset(&mut self, instruction: u16) {
let dr = get_dr(instruction);
let base_register = (instruction >> 6) & 0x07;
let offset = sign_extend(instruction & 0x3F, 6);
self.registers[dr as usize] =
self.mem_read(self.registers[base_register as usize] + offset);
self.update_flags(dr);
}
pub fn load_effective_address(&mut self, instruction: u16) {
let dr = get_dr(instruction);
let offset = sign_extend(instruction & 0x01FF, 9);
self.registers[dr as usize] = self.pc + offset;
self.update_flags(dr);
}
pub fn not(&mut self, instruction: u16) {
let dr = get_dr(instruction);
let sr = (instruction >> 6) & 0x07;
self.registers[dr as usize] = !self.registers[sr as usize];
self.update_flags(dr);
}
pub fn store(&mut self, instruction: u16) {
let sr = (instruction >> 9) & 0x07;
let offset = sign_extend(instruction & 0x01FF, 9);
self.mem_set(self.pc + offset, self.registers[sr as usize]);
}
pub fn store_indirect(&mut self, instruction: u16) {
let sr = (instruction >> 9) & 0x07;
let offset = sign_extend(instruction & 0x01FF, 9);
let address = self.mem_read(self.pc + offset);
self.mem_set(address, self.registers[sr as usize]);
}
pub fn store_base_plus_offset(&mut self, instruction: u16) {
let sr = (instruction >> 9) & 0x07;
let base_register = (instruction >> 6) & 0x07;
let offset = sign_extend(instruction & 0x3F, 6);
self.mem_set(
self.registers[base_register as usize] + offset,
self.registers[sr as usize],
);
}
pub fn trap(&mut self, instruction: u16) {
let trap_code = instruction & 0xFF;
match trap_code.try_into() {
Ok(Trapcode::GETC) => self.getc(),
Ok(Trapcode::OUT) => self.out(),
Ok(Trapcode::PUTS) => self.puts(),
Ok(Trapcode::IN) => self.input(),
Ok(Trapcode::PUTSP) => self.putsp(),
Ok(Trapcode::HALT) => self.halt(),
_ => panic!(
"Unexpected trap code. Code: {}\nRegisters: {:?}\nPc: 0x{:x}\nZF: {}\nNF: {}\nPF: {}\n",
trap_code, self.registers, self.pc, (self.cflags >> 1) & 0x1, (self.cflags >>2) & 0x1, self.cflags & 0x1
),
}
}
pub fn illegal_opcode(&self) {
panic!("Illegal opcode encountered")
}
fn getc(&mut self) {
let input = get_char();
self.registers[0] = input as u16;
}
fn out(&self) {
let value = self.registers[0] as u8;
print!("{}", value as char);
std::io::stdout().flush().unwrap();
}
fn puts(&mut self) {
let mut index = self.registers[0];
loop {
let next_char = self.mem_read(index) as u8;
if next_char == 0 {
break;
}
print!("{}", next_char as char);
index += 1;
}
std::io::stdout().flush().unwrap();
}
fn input(&mut self) {
print!("Enter a character: ");
std::io::stdout().flush().unwrap();
let input = get_char();
self.registers[0] = input as u16;
print!("{}", input as char);
}
fn putsp(&mut self) {
let mut index = self.registers[0];
loop {
let next_word = self.mem_read(index);
let low = (next_word & 0xFF) as u8;
let high = (next_word >> 8) as u8;
if low == 0 {
break;
}
print!("{}", low as char);
if high == 0 {
break;
}
print!("{}", high as char);
index += 1;
}
std::io::stdout().flush().unwrap();
}
fn halt(&mut self) {
std::io::stdout().flush().unwrap();
self.running = false;
}
pub fn mem_read(&mut self, address: u16) -> u16 {
// The way the keyboard status and data registers would be used normally
// is: whenever the user presses a key, the keyboard
// sets the status register's highest bit to one and the value of the
// key pressed into the data register; but that's not what MY keyboard
// will actually do when I press a button, so I have to emulate it.
// The way we do it is the following: when the executing program wants to read
// the status register, we check if a key has been pressed in the past; if it has, we set
// the status register's highest bit to one and its value to the data register.
// Otherwise we just set the status register to zero.
if address == KEYBOARD_STATUS_REGISTER {
if check_key() {
self.memory[KEYBOARD_STATUS_REGISTER as usize] = 1 << 15;
self.memory[KEYBOARD_DATA_REGISTER as usize] = get_char() as u16;
} else {
self.memory[KEYBOARD_STATUS_REGISTER as usize] = 0;
}
}
return self.memory[address as usize];
}
fn mem_set(&mut self, address: u16, value: u16) {
self.memory[address as usize] = value;
}
fn update_flags(&mut self, register: u16) {
let value = self.registers[register as usize];
if value == 0 {
self.cflags = F_ZERO;
} else if (value >> 15) == 1 {
self.cflags = F_NEG;
} else {
self.cflags = F_POS;
}
}
}
fn sign_extend(value: u16, bit_count: u16) -> u16 {
if ((value >> (bit_count - 1)) & 1) == 1 {
value | (0xFFFF << bit_count)
} else {
value
}
}
fn check_key() -> bool {
let mut readfds = nix::sys::select::FdSet::new();
readfds.insert(0);
let mut timeout: nix::sys::time::TimeVal = nix::sys::time::TimeValLike::seconds(0);
return nix::sys::select::select(1, &mut readfds, None, None, &mut timeout).unwrap() != 0;
}
fn get_dr(instruction: u16) -> u16 {
return (instruction >> 9) & 0x7;
}
fn get_char() -> u8 {
let input = std::io::stdin()
.bytes()
.next()
.and_then(|result| result.ok())
.map(|byte| byte as u8);
match input {
Some(value) => value,
_ => panic!("Error reading from stdin"),
}
}
| 30.11838 | 120 | 0.546132 |
fe8d5091d97dc07deb7e6cff41b61de942a20144 | 3,101 | use futures::{Future, FutureExt, TryFutureExt};
use juniper_subscriptions::Coordinator;
use juniper_warp::subscriptions::graphql_subscriptions;
use slog::{error, info, o, Drain, Logger};
use std::{pin::Pin, sync::Arc};
use warp::{self, Filter};
use mjolnir::{self, error, gql};
#[tokio::main]
async fn main() {
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
let log = slog::Logger::root(drain, o!());
run(log).await;
}
async fn run(log: Logger) {
run_error(log.clone()).await.unwrap_or_else(|err| {
error!(log, "{}", err);
})
}
async fn run_error(log: Logger) -> Result<(), error::Error> {
let root_logger = log.new(o!());
mjolnir::read_dotenv(log).await?;
let pool = mjolnir::get_connstr(root_logger.clone())
.and_then(|connstr| mjolnir::connect_db(connstr, root_logger.clone()))
.await?;
let logger1 = root_logger.clone();
let pool1 = pool.clone();
let state = warp::any().map(move || gql::Context {
pool: pool1.clone(),
logger: logger1.clone(),
});
let graphiql = warp::path("graphiql")
.and(warp::path::end())
.and(warp::get())
.and(juniper_warp::graphiql_filter("/graphql", None));
let graphql_filter = juniper_warp::make_graphql_filter(gql::schema(), state.boxed());
/* This is ApiRoutes.Base */
let graphql = warp::path!("graphql").and(graphql_filter);
let logger2 = root_logger.clone();
let pool2 = pool.clone();
let substate = warp::any().map(move || gql::Context {
pool: pool2.clone(),
logger: logger2.clone(),
});
let coordinator = Arc::new(juniper_subscriptions::Coordinator::new(gql::schema()));
let notifications = (warp::path("notifications")
.and(warp::ws())
.and(substate.clone())
.and(warp::any().map(move || Arc::clone(&coordinator)))
.map(
|ws: warp::ws::Ws,
context: gql::Context,
coordinator: Arc<Coordinator<'static, _, _, _, _, _>>| {
ws.on_upgrade(|websocket| -> Pin<Box<dyn Future<Output = ()> + Send>> {
println!("On upgrade");
graphql_subscriptions(websocket, coordinator, context)
.map(|r| {
println!("r: {:?}", r);
if let Err(err) = r {
println!("Websocket Error: {}", err);
}
})
.boxed()
})
},
))
.map(|reply| warp::reply::with_header(reply, "Sec-Websocket-Protocol", "graphql-ws"));
let index = warp::fs::file("dist/index.html");
let dir = warp::fs::dir("dist");
let routes = graphiql.or(graphql).or(notifications).or(dir).or(index);
info!(root_logger.clone(), "Serving Mjolnir on 127.0.0.1:3030");
warp::serve(routes).run(([127, 0, 0, 1], 3030)).await;
Ok(())
}
| 32.989362 | 90 | 0.556917 |
1acb6ac007a4fb647835696b8828e28b6a74edde | 30,533 | use crate::error_pages::ErrorPageData;
use crate::errors::*;
use crate::i18n::ClientTranslationsManager;
use crate::router::{RouteVerdict, RouterLoadState, RouterState};
use crate::server::PageData;
use crate::state::PageStateStore;
use crate::state::{FrozenApp, GlobalState, ThawPrefs};
use crate::template::{PageProps, Template, TemplateNodeType};
use crate::utils::get_path_prefix_client;
use crate::ErrorPages;
use fmterr::fmt_err;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::rc::Rc;
use sycamore::prelude::*;
use sycamore::rt::Reflect; // We can piggyback off Sycamore to avoid bringing in `js_sys`
use wasm_bindgen::{JsCast, JsValue};
use wasm_bindgen_futures::JsFuture;
use web_sys::{Element, Request, RequestInit, RequestMode, Response};
/// Fetches the given resource. This should NOT be used by end users, but it's required by the CLI.
#[doc(hidden)]
pub async fn fetch(url: &str) -> Result<Option<String>, ClientError> {
let js_err_handler = |err: JsValue| ClientError::Js(format!("{:?}", err));
let mut opts = RequestInit::new();
opts.method("GET").mode(RequestMode::Cors);
let request = Request::new_with_str_and_init(url, &opts).map_err(js_err_handler)?;
let window = web_sys::window().unwrap();
// Get the response as a future and await it
let res_value = JsFuture::from(window.fetch_with_request(&request))
.await
.map_err(js_err_handler)?;
// Turn that into a proper response object
let res: Response = res_value.dyn_into().unwrap();
// If the status is 404, we should return that the request worked but no file existed
if res.status() == 404 {
return Ok(None);
}
// Get the body thereof
let body_promise = res.text().map_err(js_err_handler)?;
let body = JsFuture::from(body_promise).await.map_err(js_err_handler)?;
// Convert that into a string (this will be `None` if it wasn't a string in the JS)
let body_str = body.as_string();
let body_str = match body_str {
Some(body_str) => body_str,
None => {
return Err(FetchError::NotString {
url: url.to_string(),
}
.into())
}
};
// Handle non-200 error codes
if res.status() == 200 {
Ok(Some(body_str))
} else {
Err(FetchError::NotOk {
url: url.to_string(),
status: res.status(),
err: body_str,
}
.into())
}
}
/// Gets the render configuration from the JS global variable `__PERSEUS_RENDER_CFG`, which should be inlined by the server. This will
/// return `None` on any error (not found, serialization failed, etc.), which should reasonably lead to a `panic!` in the caller.
pub fn get_render_cfg() -> Option<HashMap<String, String>> {
let val_opt = web_sys::window().unwrap().get("__PERSEUS_RENDER_CFG");
let js_obj = match val_opt {
Some(js_obj) => js_obj,
None => return None,
};
// The object should only actually contain the string value that was injected
let cfg_str = match js_obj.as_string() {
Some(cfg_str) => cfg_str,
None => return None,
};
let render_cfg = match serde_json::from_str::<HashMap<String, String>>(&cfg_str) {
Ok(render_cfg) => render_cfg,
Err(_) => return None,
};
Some(render_cfg)
}
/// Gets the initial state injected by the server, if there was any. This is used to differentiate initial loads from subsequent ones,
/// which have different log chains to prevent double-trips (a common SPA problem).
pub fn get_initial_state() -> InitialState {
let val_opt = web_sys::window().unwrap().get("__PERSEUS_INITIAL_STATE");
let js_obj = match val_opt {
Some(js_obj) => js_obj,
None => return InitialState::NotPresent,
};
// The object should only actually contain the string value that was injected
let state_str = match js_obj.as_string() {
Some(state_str) => state_str,
None => return InitialState::NotPresent,
};
// On the server-side, we encode a `None` value directly (otherwise it will be some convoluted stringified JSON)
if state_str == "None" {
InitialState::Present(None)
} else if state_str.starts_with("error-") {
// We strip the prefix and escape any tab/newline control characters (inserted by `fmterr`)
// Any others are user-inserted, and this is documented
let err_page_data_str = state_str
.strip_prefix("error-")
.unwrap()
.replace("\n", "\\n")
.replace("\t", "\\t");
// There will be error page data encoded after `error-`
let err_page_data = match serde_json::from_str::<ErrorPageData>(&err_page_data_str) {
Ok(render_cfg) => render_cfg,
// If there's a serialization error, we'll create a whole new error (500)
Err(err) => ErrorPageData {
url: "[current]".to_string(),
status: 500,
err: format!("couldn't serialize error from server: '{}'", err),
},
};
InitialState::Error(err_page_data)
} else {
InitialState::Present(Some(state_str))
}
}
/// Gets the global state injected by the server, if there was any. If there are errors in this, we can return `None` and not worry about it, they'll be handled by the initial state.
pub fn get_global_state() -> Option<String> {
let val_opt = web_sys::window().unwrap().get("__PERSEUS_GLOBAL_STATE");
let js_obj = match val_opt {
Some(js_obj) => js_obj,
None => return None,
};
// The object should only actually contain the string value that was injected
let state_str = match js_obj.as_string() {
Some(state_str) => state_str,
None => return None,
};
// On the server-side, we encode a `None` value directly (otherwise it will be some convoluted stringified JSON)
match state_str.as_str() {
"None" => None,
state_str => Some(state_str.to_string()),
}
}
/// Marks a checkpoint in the code and alerts any tests that it's been reached by creating an element that represents it. The preferred
/// solution would be emitting a DOM event, but the WebDriver specification currently doesn't support waiting on those (go figure). This
/// will only create a custom element if the `__PERSEUS_TESTING` JS global variable is set to `true`.
///
/// This adds a `<div id="__perseus_checkpoint-<event-name>" />` to the `<div id="__perseus_checkpoints"></div>` element, creating the
/// latter if it doesn't exist. Each checkpoint must have a unique name, and if the same checkpoint is executed twice, it'll be added
/// with a `-<number>` after it, starting from `0`. In this way, we have a functional checkpoints queue for signalling to test code!
/// Note that the checkpoint queue is NOT cleared on subsequent loads.
///
/// Note: this is not just for internal usage, it's highly recommended that you use this for your own checkpoints as well! Just make
/// sure your tests don't conflict with any internal Perseus checkpoint names (preferably prefix yours with `custom-` or the like, as
/// Perseus' checkpoints may change at any time, but won't ever use that namespace).
///
/// WARNING: your checkpoint names must not include hyphens! This will result in a `panic!`.
pub fn checkpoint(name: &str) {
if name.contains('-') {
panic!("checkpoint must not contain hyphens, use underscores instead (hyphens are used as an internal delimiter)");
}
let val_opt = web_sys::window().unwrap().get("__PERSEUS_TESTING");
let js_obj = match val_opt {
Some(js_obj) => js_obj,
None => return,
};
// The object should only actually contain the string value that was injected
let is_testing = match js_obj.as_bool() {
Some(cfg_str) => cfg_str,
None => return,
};
if !is_testing {
return;
}
// If we're here, we're testing
// We dispatch a console warning to reduce the likelihood of literal 'testing in prod'
crate::web_log!("Perseus is in testing mode. If you're an end-user and seeing this message, please report this as a bug to the website owners!");
// Create a custom element that can be waited for by the WebDriver
// This will be removed by the next checkpoint
let document = web_sys::window().unwrap().document().unwrap();
let container_opt = document.query_selector("#__perseus_checkpoints").unwrap();
let container = match container_opt {
Some(container_i) => container_i,
None => {
// If the container doesn't exist yet, create it
let container = document.create_element("div").unwrap();
container.set_id("__perseus_checkpoints");
document
.query_selector("body")
.unwrap()
.unwrap()
.append_with_node_1(&container)
.unwrap();
container
}
};
// Get the number of checkpoints that already exist with the same ID
// We prevent having to worry about checkpoints whose names are subsets of others by using the hyphen as a delimiter
let num_checkpoints = document
.query_selector_all(&format!("[id^=__perseus_checkpoint-{}-]", name))
.unwrap()
.length();
// Append the new checkpoint
let checkpoint = document.create_element("div").unwrap();
checkpoint.set_id(&format!(
"__perseus_checkpoint-{}-{}",
name, num_checkpoints
));
container.append_with_node_1(&checkpoint).unwrap();
}
/// A representation of whether or not the initial state was present. If it was, it could be `None` (some templates take no state), and
/// if not, then this isn't an initial load, and we need to request the page from the server. It could also be an error that the server
/// has rendered.
#[derive(Debug)]
pub enum InitialState {
/// A non-error initial state has been injected.
Present(Option<String>),
/// An initial state ahs been injected that indicates an error.
Error(ErrorPageData),
/// No initial state has been injected (or if it has, it's been deliberately unset).
NotPresent,
}
/// Properties for the app shell. These should be constructed literally when working with the app shell.
#[derive(Debug)]
pub struct ShellProps {
/// The path we're rendering for (not the template path, the full path, though parsed a little).
pub path: String,
/// The template to render for.
pub template: Rc<Template<TemplateNodeType>>,
/// Whether or not the router returned an incremental match (if this page exists on a template using incremental generation and it wasn't defined at build time).
pub was_incremental_match: bool,
/// The locale we're rendering in.
pub locale: String,
/// The router state.
pub router_state: RouterState,
/// The template state store.
pub page_state_store: PageStateStore,
/// A *client-side* translations manager to use (this manages caching translations).
pub translations_manager: Rc<RefCell<ClientTranslationsManager>>,
/// The error pages, for use if something fails.
pub error_pages: Rc<ErrorPages<DomNode>>,
/// The container responsible for the initial render from the server (non-interactive, this may need to be wiped).
pub initial_container: Element,
/// The container for reactive content.
pub container_rx_elem: Element,
/// The global state store. Brekaing it out here prevents it being overriden every time a new template loads.
pub global_state: GlobalState,
/// A previous frozen state to be gradully rehydrated. This should always be `None`, it only serves to provide continuity across templates.
pub frozen_app: Rc<RefCell<Option<(FrozenApp, ThawPrefs)>>>,
/// The current route verdict. This will be stored in context so that it can be used for possible reloads. Eventually,
/// this will be made obsolete when Sycamore supports this natively.
pub route_verdict: RouteVerdict<TemplateNodeType>,
/// Whether or not this page is the very first to have been rendered since the browser loaded the app.
pub is_first: Rc<Cell<bool>>,
#[cfg(all(feature = "live-reload", debug_assertions))]
/// An indicator `Signal` used to allow the root to instruct the app that we're about to reload because of an instruction from the live reloading server.
pub live_reload_indicator: ReadSignal<bool>,
}
/// Fetches the information for the given page and renders it. This should be provided the actual path of the page to render (not just the
/// broader template). Asynchronous Wasm is handled here, because only a few cases need it.
// TODO handle exceptions higher up
pub async fn app_shell(
ShellProps {
path,
template,
was_incremental_match,
locale,
mut router_state,
page_state_store,
translations_manager,
error_pages,
initial_container,
container_rx_elem,
global_state: curr_global_state,
frozen_app,
route_verdict,
is_first,
#[cfg(all(feature = "live-reload", debug_assertions))]
live_reload_indicator,
}: ShellProps,
) {
checkpoint("app_shell_entry");
let path_with_locale = match locale.as_str() {
"xx-XX" => path.clone(),
locale => format!("{}/{}", locale, &path),
};
// Update the router state
router_state.set_load_state(RouterLoadState::Loading {
template_name: template.get_path(),
path: path_with_locale.clone(),
});
router_state.set_last_verdict(route_verdict);
// Get the global state if possible (we'll want this in all cases except errors)
// If this is a subsequent load, the template macro will have already set up the global state, and it will ignore whatever we naively give it (so we'll give it `None`)
let global_state = get_global_state();
// Check if this was an initial load and we already have the state
let initial_state = get_initial_state();
match initial_state {
// If we do have an initial state, then we have everything we need for immediate hydration (no double trips)
// The state is here, and the HTML has already been injected for us (including head metadata)
InitialState::Present(state) => {
checkpoint("initial_state_present");
// Unset the initial state variable so we perform subsequent renders correctly
// This monstrosity is needed until `web-sys` adds a `.set()` method on `Window`
// We don't do this for the global state because it should hang around uninitialized until a template wants it (if we remove it before then, we're stuffed)
Reflect::set(
&JsValue::from(web_sys::window().unwrap()),
&JsValue::from("__PERSEUS_INITIAL_STATE"),
&JsValue::undefined(),
)
.unwrap();
// We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly)
let initial_html = initial_container.inner_html();
container_rx_elem.set_inner_html(&initial_html);
initial_container.set_inner_html("");
// Make the initial container invisible
initial_container
.set_attribute("style", "display: none;")
.unwrap();
checkpoint("page_visible");
// Now that the user can see something, we can get the translator
let mut translations_manager_mut = translations_manager.borrow_mut();
// This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations
let translator = translations_manager_mut
.get_translator_for_locale(&locale)
.await;
let translator = match translator {
Ok(translator) => translator,
Err(err) => {
// Directly eliminate the HTML sent in from the server before we render an error page
container_rx_elem.set_inner_html("");
match &err {
// These errors happen because we couldn't get a translator, so they certainly don't get one
ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, *status, &fmt_err(&err), None, &container_rx_elem),
ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, 500, &fmt_err(&err), None, &container_rx_elem),
ClientError::LocaleNotSupported { .. } => return error_pages.render_page(&format!("/{}/...", locale), 404, &fmt_err(&err), None, &container_rx_elem),
// No other errors should be returned
_ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error")
}
}
};
let path = template.get_path();
// Hydrate that static code using the acquired state
let router_state_2 = router_state.clone();
// BUG (Sycamore): this will double-render if the component is just text (no nodes)
let page_props = PageProps {
path: path_with_locale.clone(),
state,
global_state,
};
#[cfg(not(feature = "hydrate"))]
{
// If we aren't hydrating, we'll have to delete everything and re-render
container_rx_elem.set_inner_html("");
sycamore::render_to(
move || {
template.render_for_template_client(
page_props,
translator,
false,
router_state_2,
page_state_store,
curr_global_state,
frozen_app,
is_first,
#[cfg(all(feature = "live-reload", debug_assertions))]
live_reload_indicator,
)
},
&container_rx_elem,
);
}
#[cfg(feature = "hydrate")]
sycamore::hydrate_to(
// This function provides translator context as needed
|| {
template.render_for_template_client(
page_props,
translator,
false,
router_state_2,
page_state_store,
curr_global_state,
frozen_app,
is_first,
#[cfg(all(feature = "live-reload", debug_assertions))]
live_reload_indicator,
)
},
&container_rx_elem,
);
checkpoint("page_interactive");
// Update the router state
router_state.set_load_state(RouterLoadState::Loaded {
template_name: path,
path: path_with_locale,
});
}
// If we have no initial state, we should proceed as usual, fetching the content and state from the server
InitialState::NotPresent => {
checkpoint("initial_state_not_present");
// If we're getting data about the index page, explicitly set it to that
// This can be handled by the Perseus server (and is), but not by static exporting
let path = match path.is_empty() {
true => "index".to_string(),
false => path,
};
// Get the static page data
let asset_url = format!(
"{}/.perseus/page/{}/{}.json?template_name={}&was_incremental_match={}",
get_path_prefix_client(),
locale,
path,
template.get_path(),
was_incremental_match
);
// If this doesn't exist, then it's a 404 (we went here by explicit navigation, but it may be an unservable ISR page or the like)
let page_data_str = fetch(&asset_url).await;
match page_data_str {
Ok(page_data_str) => match page_data_str {
Some(page_data_str) => {
// All good, deserialize the page data
let page_data = serde_json::from_str::<PageData>(&page_data_str);
match page_data {
Ok(page_data) => {
// We have the page data ready, render everything
// Interpolate the HTML directly into the document (we'll hydrate it later)
container_rx_elem.set_inner_html(&page_data.content);
// Interpolate the metadata directly into the document's `<head>`
// Get the current head
let head_elem = web_sys::window()
.unwrap()
.document()
.unwrap()
.query_selector("head")
.unwrap()
.unwrap();
let head_html = head_elem.inner_html();
// We'll assume that there's already previously interpolated head in addition to the hardcoded stuff, but it will be separated by the server-injected delimiter comment
// Thus, we replace the stuff after that delimiter comment with the new head
let head_parts: Vec<&str> = head_html
.split("<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->")
.collect();
let new_head = format!(
"{}\n<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->\n{}",
head_parts[0], &page_data.head
);
head_elem.set_inner_html(&new_head);
checkpoint("page_visible");
// Now that the user can see something, we can get the translator
let mut translations_manager_mut =
translations_manager.borrow_mut();
// This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations
let translator = translations_manager_mut
.get_translator_for_locale(&locale)
.await;
let translator = match translator {
Ok(translator) => translator,
Err(err) => match &err {
// These errors happen because we couldn't get a translator, so they certainly don't get one
ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, *status, &fmt_err(&err), None, &container_rx_elem),
ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, 500, &fmt_err(&err), None, &container_rx_elem),
ClientError::LocaleNotSupported { locale } => return error_pages.render_page(&format!("/{}/...", locale), 404, &fmt_err(&err), None, &container_rx_elem),
// No other errors should be returned
_ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error")
}
};
// Hydrate that static code using the acquired state
let router_state_2 = router_state.clone();
// BUG (Sycamore): this will double-render if the component is just text (no nodes)
let page_props = PageProps {
path: path_with_locale.clone(),
state: page_data.state,
global_state,
};
let template_name = template.get_path();
#[cfg(not(feature = "hydrate"))]
{
// If we aren't hydrating, we'll have to delete everything and re-render
container_rx_elem.set_inner_html("");
sycamore::render_to(
move || {
template.render_for_template_client(
page_props,
translator,
false,
router_state_2.clone(),
page_state_store,
curr_global_state,
frozen_app,
is_first,
#[cfg(all(
feature = "live-reload",
debug_assertions
))]
live_reload_indicator,
)
},
&container_rx_elem,
);
}
#[cfg(feature = "hydrate")]
sycamore::hydrate_to(
// This function provides translator context as needed
move || {
template.render_for_template_client(
page_props,
translator,
false,
router_state_2,
page_state_store,
curr_global_state,
frozen_app,
is_first,
#[cfg(all(feature = "live-reload", debug_assertions))]
live_reload_indicator,
)
},
&container_rx_elem,
);
checkpoint("page_interactive");
// Update the router state
router_state.set_load_state(RouterLoadState::Loaded {
template_name,
path: path_with_locale,
});
}
// If the page failed to serialize, an exception has occurred
Err(err) => panic!("page data couldn't be serialized: '{}'", err),
};
}
// No translators ready yet
None => error_pages.render_page(
&asset_url,
404,
"page not found",
None,
&container_rx_elem,
),
},
Err(err) => match &err {
// No translators ready yet
ClientError::FetchError(FetchError::NotOk { url, status, .. }) => error_pages
.render_page(url, *status, &fmt_err(&err), None, &container_rx_elem),
// No other errors should be returned
_ => panic!("expected 'AssetNotOk' error, found other unacceptable error"),
},
};
}
// Nothing should be done if an error was sent down
InitialState::Error(ErrorPageData { url, status, err }) => {
checkpoint("initial_state_error");
// We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly)
// If we're not hydrating, there's no point in moving anything over, we'll just fully re-render
#[cfg(feature = "hydrate")]
{
let initial_html = initial_container.inner_html();
container_rx_elem.set_inner_html(&initial_html);
}
initial_container.set_inner_html("");
// Make the initial container invisible
initial_container
.set_attribute("style", "display: none;")
.unwrap();
// Hydrate the currently static error page
// Right now, we don't provide translators to any error pages that have come from the server
// We render this rather than hydrating because otherwise we'd need a `HydrateNode` at the plugins level, which is way too inefficient
#[cfg(not(feature = "hydrate"))]
container_rx_elem.set_inner_html("");
error_pages.render_page(&url, status, &err, None, &container_rx_elem);
}
};
}
| 51.83871 | 199 | 0.552517 |
fea8bd5593cb6926dd26fde0352a66a90fb87d70 | 11,199 | use cosmwasm_std::{
to_binary, BankMsg, Binary, Context, Deps, DepsMut, Env, HandleResponse, HumanAddr,
InitResponse, MessageInfo, StdResult,
};
use crate::error::ContractError;
use crate::msg::{ConfigResponse, HandleMsg, InitMsg, QueryMsg};
use crate::state::{config, config_read, State};
pub fn init(
deps: DepsMut,
env: Env,
info: MessageInfo,
msg: InitMsg,
) -> Result<InitResponse, ContractError> {
if msg.expires <= env.block.height {
return Err(ContractError::OptionExpired {
expired: msg.expires,
});
}
let state = State {
creator: info.sender.clone(),
owner: info.sender.clone(),
collateral: info.sent_funds,
counter_offer: msg.counter_offer,
expires: msg.expires,
};
config(deps.storage).save(&state)?;
Ok(InitResponse::default())
}
pub fn handle(
deps: DepsMut,
env: Env,
info: MessageInfo,
msg: HandleMsg,
) -> Result<HandleResponse, ContractError> {
match msg {
HandleMsg::Transfer { recipient } => handle_transfer(deps, env, info, recipient),
HandleMsg::Execute {} => handle_execute(deps, env, info),
HandleMsg::Burn {} => handle_burn(deps, env, info),
}
}
pub fn handle_transfer(
deps: DepsMut,
_env: Env,
info: MessageInfo,
recipient: HumanAddr,
) -> Result<HandleResponse, ContractError> {
// ensure msg sender is the owner
let mut state = config(deps.storage).load()?;
if info.sender != state.owner {
return Err(ContractError::Unauthorized {});
}
// set new owner on state
state.owner = recipient.clone();
config(deps.storage).save(&state)?;
let mut res = Context::new();
res.add_attribute("action", "transfer");
res.add_attribute("owner", recipient);
Ok(res.into())
}
pub fn handle_execute(
deps: DepsMut,
env: Env,
info: MessageInfo,
) -> Result<HandleResponse, ContractError> {
// ensure msg sender is the owner
let state = config(deps.storage).load()?;
if info.sender != state.owner {
return Err(ContractError::Unauthorized {});
}
// ensure not expired
if env.block.height >= state.expires {
return Err(ContractError::OptionExpired {
expired: state.expires,
});
}
// ensure sending proper counter_offer
if info.sent_funds != state.counter_offer {
return Err(ContractError::CounterOfferMismatch {
offer: info.sent_funds,
counter_offer: state.counter_offer,
});
}
// release counter_offer to creator
let mut res = Context::new();
res.add_message(BankMsg::Send {
from_address: env.contract.address.clone(),
to_address: state.creator,
amount: state.counter_offer,
});
// release collateral to sender
res.add_message(BankMsg::Send {
from_address: env.contract.address,
to_address: state.owner,
amount: state.collateral,
});
// delete the option
config(deps.storage).remove();
res.add_attribute("action", "execute");
Ok(res.into())
}
pub fn handle_burn(
deps: DepsMut,
env: Env,
info: MessageInfo,
) -> Result<HandleResponse, ContractError> {
// ensure is expired
let state = config(deps.storage).load()?;
if env.block.height < state.expires {
return Err(ContractError::OptionNotExpired {
expires: state.expires,
});
}
// ensure sending proper counter_offer
if !info.sent_funds.is_empty() {
return Err(ContractError::FundsSentWithBurn {});
}
// release collateral to creator
let mut res = Context::new();
res.add_message(BankMsg::Send {
from_address: env.contract.address,
to_address: state.creator,
amount: state.collateral,
});
// delete the option
config(deps.storage).remove();
res.add_attribute("action", "burn");
Ok(res.into())
}
pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult<Binary> {
match msg {
QueryMsg::Config {} => to_binary(&query_config(deps)?),
}
}
fn query_config(deps: Deps) -> StdResult<ConfigResponse> {
let state = config_read(deps.storage).load()?;
Ok(state)
}
#[cfg(test)]
mod tests {
use super::*;
use cosmwasm_std::testing::{mock_dependencies, mock_env, mock_info, MOCK_CONTRACT_ADDR};
use cosmwasm_std::{attr, coins, CosmosMsg};
#[test]
fn proper_initialization() {
let mut deps = mock_dependencies(&[]);
let msg = InitMsg {
counter_offer: coins(40, "ETH"),
expires: 100_000,
};
let info = mock_info("creator", &coins(1, "BTC"));
// we can just call .unwrap() to assert this was a success
let res = init(deps.as_mut(), mock_env(), info, msg).unwrap();
assert_eq!(0, res.messages.len());
// it worked, let's query the state
let res = query_config(deps.as_ref()).unwrap();
assert_eq!(100_000, res.expires);
assert_eq!("creator", res.owner.as_str());
assert_eq!("creator", res.creator.as_str());
assert_eq!(coins(1, "BTC"), res.collateral);
assert_eq!(coins(40, "ETH"), res.counter_offer);
}
#[test]
fn transfer() {
let mut deps = mock_dependencies(&[]);
let msg = InitMsg {
counter_offer: coins(40, "ETH"),
expires: 100_000,
};
let info = mock_info("creator", &coins(1, "BTC"));
// we can just call .unwrap() to assert this was a success
let res = init(deps.as_mut(), mock_env(), info, msg).unwrap();
assert_eq!(0, res.messages.len());
// random cannot transfer
let info = mock_info("anyone", &[]);
let err = handle_transfer(deps.as_mut(), mock_env(), info, HumanAddr::from("anyone"))
.unwrap_err();
match err {
ContractError::Unauthorized {} => {}
e => panic!("unexpected error: {}", e),
}
// owner can transfer
let info = mock_info("creator", &[]);
let res =
handle_transfer(deps.as_mut(), mock_env(), info, HumanAddr::from("someone")).unwrap();
assert_eq!(res.attributes.len(), 2);
assert_eq!(res.attributes[0], attr("action", "transfer"));
// check updated properly
let res = query_config(deps.as_ref()).unwrap();
assert_eq!("someone", res.owner.as_str());
assert_eq!("creator", res.creator.as_str());
}
#[test]
fn execute() {
let mut deps = mock_dependencies(&[]);
let amount = coins(40, "ETH");
let collateral = coins(1, "BTC");
let expires = 100_000;
let msg = InitMsg {
counter_offer: amount.clone(),
expires: expires,
};
let info = mock_info("creator", &collateral);
// we can just call .unwrap() to assert this was a success
let _ = init(deps.as_mut(), mock_env(), info, msg).unwrap();
// set new owner
let info = mock_info("creator", &[]);
let _ = handle_transfer(deps.as_mut(), mock_env(), info, HumanAddr::from("owner")).unwrap();
// random cannot execute
let info = mock_info("creator", &amount);
let err = handle_execute(deps.as_mut(), mock_env(), info).unwrap_err();
match err {
ContractError::Unauthorized {} => {}
e => panic!("unexpected error: {}", e),
}
// expired cannot execute
let info = mock_info("owner", &amount);
let mut env = mock_env();
env.block.height = 200_000;
let err = handle_execute(deps.as_mut(), env, info).unwrap_err();
match err {
ContractError::OptionExpired { expired } => assert_eq!(expired, expires),
e => panic!("unexpected error: {}", e),
}
// bad counter_offer cannot execute
let msg_offer = coins(39, "ETH");
let info = mock_info("owner", &msg_offer);
let err = handle_execute(deps.as_mut(), mock_env(), info).unwrap_err();
match err {
ContractError::CounterOfferMismatch {
offer,
counter_offer,
} => {
assert_eq!(msg_offer, offer);
assert_eq!(amount, counter_offer);
}
e => panic!("unexpected error: {}", e),
}
// proper execution
let info = mock_info("owner", &amount);
let res = handle_execute(deps.as_mut(), mock_env(), info).unwrap();
assert_eq!(res.messages.len(), 2);
assert_eq!(
res.messages[0],
CosmosMsg::Bank(BankMsg::Send {
from_address: MOCK_CONTRACT_ADDR.into(),
to_address: "creator".into(),
amount,
})
);
assert_eq!(
res.messages[1],
CosmosMsg::Bank(BankMsg::Send {
from_address: MOCK_CONTRACT_ADDR.into(),
to_address: "owner".into(),
amount: collateral,
})
);
// check deleted
let _ = query_config(deps.as_ref()).unwrap_err();
}
#[test]
fn burn() {
let mut deps = mock_dependencies(&[]);
let counter_offer = coins(40, "ETH");
let collateral = coins(1, "BTC");
let msg_expires = 100_000;
let msg = InitMsg {
counter_offer: counter_offer.clone(),
expires: msg_expires,
};
let info = mock_info("creator", &collateral);
// we can just call .unwrap() to assert this was a success
let _ = init(deps.as_mut(), mock_env(), info, msg).unwrap();
// set new owner
let info = mock_info("creator", &[]);
let _ = handle_transfer(deps.as_mut(), mock_env(), info, HumanAddr::from("owner")).unwrap();
// non-expired cannot execute
let info = mock_info("anyone", &[]);
let err = handle_burn(deps.as_mut(), mock_env(), info).unwrap_err();
match err {
ContractError::OptionNotExpired { expires } => assert_eq!(expires, msg_expires),
e => panic!("unexpected error: {}", e),
}
// with funds cannot execute
let info = mock_info("anyone", &counter_offer);
let mut env = mock_env();
env.block.height = 200_000;
let err = handle_burn(deps.as_mut(), env, info).unwrap_err();
match err {
ContractError::FundsSentWithBurn {} => {}
e => panic!("unexpected error: {}", e),
}
// expired returns funds
let info = mock_info("anyone", &[]);
let mut env = mock_env();
env.block.height = 200_000;
let res = handle_burn(deps.as_mut(), env, info).unwrap();
assert_eq!(res.messages.len(), 1);
assert_eq!(
res.messages[0],
CosmosMsg::Bank(BankMsg::Send {
from_address: MOCK_CONTRACT_ADDR.into(),
to_address: "creator".into(),
amount: collateral,
})
);
// check deleted
let _ = query_config(deps.as_ref()).unwrap_err();
}
}
| 30.936464 | 100 | 0.57139 |
6a19af7ea939ae2a97d4915b203fe4a10c9acf49 | 19,918 | extern crate proc_macro;
use proc_macro::TokenStream;
use quote::{quote, quote_spanned};
use std::collections::{HashMap, HashSet};
use syn::{
parenthesized,
parse::{Parse, ParseStream, Result},
parse_macro_input,
punctuated::Punctuated,
spanned::Spanned,
Error, Fields, Ident, Token, Type, Variant, Visibility,
};
/// Parses a DSL for defining finite state machines, and produces code implementing the
/// [StateMachine](trait.StateMachine.html) trait.
///
/// An example state machine definition of a card reader for unlocking a door:
/// ```
/// # extern crate state_machine_trait as rustfsm;
/// use state_machine_procmacro::fsm;
/// use std::convert::Infallible;
/// use state_machine_trait::{StateMachine, TransitionResult};
///
/// fsm! {
/// name CardReader; command Commands; error Infallible; shared_state SharedState;
///
/// Locked --(CardReadable(CardData), shared on_card_readable) --> ReadingCard;
/// Locked --(CardReadable(CardData), shared on_card_readable) --> Locked;
/// ReadingCard --(CardAccepted, on_card_accepted) --> DoorOpen;
/// ReadingCard --(CardRejected, on_card_rejected) --> Locked;
/// DoorOpen --(DoorClosed, on_door_closed) --> Locked;
/// }
///
/// #[derive(Clone)]
/// pub struct SharedState {
/// last_id: Option<String>
/// }
///
/// #[derive(Debug, Clone, Eq, PartialEq, Hash)]
/// pub enum Commands {
/// StartBlinkingLight,
/// StopBlinkingLight,
/// ProcessData(CardData),
/// }
///
/// type CardData = String;
///
/// /// Door is locked / idle / we are ready to read
/// #[derive(Debug, Clone, Eq, PartialEq, Hash, Default)]
/// pub struct Locked {}
///
/// /// Actively reading the card
/// #[derive(Debug, Clone, Eq, PartialEq, Hash)]
/// pub struct ReadingCard {
/// card_data: CardData,
/// }
///
/// /// The door is open, we shouldn't be accepting cards and should be blinking the light
/// #[derive(Debug, Clone, Eq, PartialEq, Hash)]
/// pub struct DoorOpen {}
/// impl DoorOpen {
/// fn on_door_closed(&self) -> CardReaderTransition {
/// TransitionResult::ok(vec![], Locked {})
/// }
/// }
///
/// impl Locked {
/// fn on_card_readable(&self, shared_dat: SharedState, data: CardData) -> CardReaderTransition {
/// match shared_dat.last_id {
/// // Arbitrarily deny the same person entering twice in a row
/// Some(d) if d == data => TransitionResult::default::<Locked>(),
/// _ => {
/// // Otherwise issue a processing command. This illustrates using the same handler
/// // for different destinations
/// TransitionResult::ok_shared(
/// vec![
/// Commands::ProcessData(data.clone()),
/// Commands::StartBlinkingLight,
/// ],
/// ReadingCard { card_data: data.clone() },
/// SharedState { last_id: Some(data) }
/// )
/// }
/// }
/// }
/// }
///
/// impl ReadingCard {
/// fn on_card_accepted(&self) -> CardReaderTransition {
/// TransitionResult::ok(vec![Commands::StopBlinkingLight], DoorOpen {})
/// }
/// fn on_card_rejected(&self) -> CardReaderTransition {
/// TransitionResult::ok(vec![Commands::StopBlinkingLight], Locked {})
/// }
/// }
///
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let crs = CardReaderState::Locked(Locked {});
/// let mut cr = CardReader { state: crs, shared_state: SharedState { last_id: None } };
/// let cmds = cr.on_event_mut(CardReaderEvents::CardReadable("badguy".to_string()))?;
/// assert_eq!(cmds[0], Commands::ProcessData("badguy".to_string()));
/// assert_eq!(cmds[1], Commands::StartBlinkingLight);
///
/// let cmds = cr.on_event_mut(CardReaderEvents::CardRejected)?;
/// assert_eq!(cmds[0], Commands::StopBlinkingLight);
///
/// let cmds = cr.on_event_mut(CardReaderEvents::CardReadable("goodguy".to_string()))?;
/// assert_eq!(cmds[0], Commands::ProcessData("goodguy".to_string()));
/// assert_eq!(cmds[1], Commands::StartBlinkingLight);
///
/// let cmds = cr.on_event_mut(CardReaderEvents::CardAccepted)?;
/// assert_eq!(cmds[0], Commands::StopBlinkingLight);
/// # Ok(())
/// # }
/// ```
///None
/// In the above example the first word is the name of the state machine, then after the comma the
/// type (which you must define separately) of commands produced by the machine.
///
/// then each line represents a transition, where the first word is the initial state, the tuple
/// inside the arrow is `(eventtype[, event handler])`, and the word after the arrow is the
/// destination state. here `eventtype` is an enum variant , and `event_handler` is a function you
/// must define outside the enum whose form depends on the event variant. the only variant types
/// allowed are unit and one-item tuple variants. For unit variants, the function takes no
/// parameters. For the tuple variants, the function takes the variant data as its parameter. In
/// either case the function is expected to return a `TransitionResult` to the appropriate state.
///
/// The first transition can be interpreted as "If the machine is in the locked state, when a
/// `CardReadable` event is seen, call `on_card_readable` (pasing in `CardData`) and transition to
/// the `ReadingCard` state.
///
/// The macro will generate a few things:
/// * A struct for the overall state machine, named with the provided name. Here:
/// ```ignore
/// struct CardMachine {
/// state: CardMachineState,
/// shared_state: CardId,
/// }
/// ```
/// * An enum with a variant for each state, named with the provided name + "State".
/// ```ignore
/// enum CardMachineState {
/// Locked(Locked),
/// ReadingCard(ReadingCard),
/// Unlocked(Unlocked),
/// }
/// ```
///
/// You are expected to define a type for each state, to contain that state's data. If there is
/// no data, you can simply: `type StateName = ()`
/// * An enum with a variant for each event. You are expected to define the type (if any) contained
/// in the event variant.
/// ```ignore
/// enum CardMachineEvents {
/// CardReadable(CardData)
/// }
/// ```
/// * An implementation of the [StateMachine](trait.StateMachine.html) trait for the generated state
/// machine enum (in this case, `CardMachine`)
/// * A type alias for a [TransitionResult](enum.TransitionResult.html) with the appropriate generic
/// parameters set for your machine. It is named as your machine with `Transition` appended. In
/// this case, `CardMachineTransition`.
#[proc_macro]
pub fn fsm(input: TokenStream) -> TokenStream {
let def: StateMachineDefinition = parse_macro_input!(input as StateMachineDefinition);
def.codegen()
}
mod kw {
syn::custom_keyword!(name);
syn::custom_keyword!(command);
syn::custom_keyword!(error);
syn::custom_keyword!(shared);
syn::custom_keyword!(shared_state);
}
struct StateMachineDefinition {
visibility: Visibility,
name: Ident,
shared_state_type: Option<Type>,
command_type: Ident,
error_type: Ident,
transitions: HashSet<Transition>,
}
impl StateMachineDefinition {
fn is_final_state(&self, state: &Ident) -> bool {
// If no transitions go from this state, it's a final state.
self.transitions.iter().find(|t| t.from == *state).is_none()
}
}
impl Parse for StateMachineDefinition {
fn parse(input: ParseStream) -> Result<Self> {
// Parse visibility if present
let visibility = input.parse()?;
// parse the state machine name, command type, and error type
let (name, command_type, error_type, shared_state_type) = parse_machine_types(&input).map_err(|mut e| {
e.combine(Error::new(
e.span(),
"The fsm definition should begin with `name MachineName; command CommandType; error ErrorType;` optionally followed by `shared_state SharedStateType;`",
));
e
})?;
// Then the state machine definition is simply a sequence of transitions separated by
// semicolons
let transitions: Punctuated<Transition, Token![;]> =
input.parse_terminated(Transition::parse)?;
let transitions = transitions.into_iter().collect();
Ok(Self {
visibility,
name,
shared_state_type,
transitions,
command_type,
error_type,
})
}
}
fn parse_machine_types(input: &ParseStream) -> Result<(Ident, Ident, Ident, Option<Type>)> {
let _: kw::name = input.parse()?;
let name: Ident = input.parse()?;
input.parse::<Token![;]>()?;
let _: kw::command = input.parse()?;
let command_type: Ident = input.parse()?;
input.parse::<Token![;]>()?;
let _: kw::error = input.parse()?;
let error_type: Ident = input.parse()?;
input.parse::<Token![;]>()?;
let shared_state_type: Option<Type> = if input.peek(kw::shared_state) {
let _: kw::shared_state = input.parse()?;
let typep = input.parse()?;
input.parse::<Token![;]>()?;
Some(typep)
} else {
None
};
Ok((name, command_type, error_type, shared_state_type))
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
struct Transition {
from: Ident,
to: Ident,
event: Variant,
handler: Option<Ident>,
mutates_shared: bool,
}
impl Parse for Transition {
fn parse(input: ParseStream) -> Result<Self> {
// TODO: Currently the handlers are not required to transition to the state they claimed
// they would. It would be great to find a way to fix that.
// Parse the initial state name
let from: Ident = input.parse()?;
// Parse at least one dash
input.parse::<Token![-]>()?;
while input.peek(Token![-]) {
input.parse::<Token![-]>()?;
}
// Parse transition information inside parens
let transition_info;
parenthesized!(transition_info in input);
// Get the event variant definition
let event: Variant = transition_info.parse()?;
// Reject non-unit or single-item-tuple variants
match &event.fields {
Fields::Named(_) => {
return Err(Error::new(
event.span(),
"Struct variants are not supported for events",
))
}
Fields::Unnamed(uf) => {
if uf.unnamed.len() != 1 {
return Err(Error::new(
event.span(),
"Only tuple variants with exactly one item are supported for events",
));
}
}
Fields::Unit => {}
}
// Check if there is an event handler, and parse it
let (mutates_shared, handler) = if transition_info.peek(Token![,]) {
transition_info.parse::<Token![,]>()?;
// Check for mut keyword signifying handler wants to mutate shared state
let mutates = if transition_info.peek(kw::shared) {
transition_info.parse::<kw::shared>()?;
true
} else {
false
};
(mutates, Some(transition_info.parse()?))
} else {
(false, None)
};
// Parse at least one dash followed by the "arrow"
input.parse::<Token![-]>()?;
while input.peek(Token![-]) {
input.parse::<Token![-]>()?;
}
input.parse::<Token![>]>()?;
// Parse the destination state
let to: Ident = input.parse()?;
Ok(Self {
from,
event,
handler,
to,
mutates_shared,
})
}
}
impl StateMachineDefinition {
fn codegen(&self) -> TokenStream {
let visibility = self.visibility.clone();
// First extract all of the states into a set, and build the enum's insides
let states: HashSet<_> = self
.transitions
.iter()
.flat_map(|t| vec![t.from.clone(), t.to.clone()])
.collect();
let state_variants = states.iter().map(|s| {
let statestr = s.to_string();
quote! {
#[display(fmt=#statestr)]
#s(#s)
}
});
let name = &self.name;
let name_str = &self.name.to_string();
let state_enum_name = Ident::new(&format!("{}State", name), name.span());
// If user has not defined any shared state, use the unit type.
let shared_state_type = self
.shared_state_type
.clone()
.unwrap_or_else(|| syn::parse_str("()").unwrap());
let machine_struct = quote! {
#[derive(Clone)]
#visibility struct #name {
state: #state_enum_name,
shared_state: #shared_state_type
}
};
let states_enum = quote! {
#[derive(::derive_more::From, Clone, ::derive_more::Display)]
#visibility enum #state_enum_name {
#(#state_variants),*
}
};
let state_is_final_match_arms = states.iter().map(|s| {
let val = if self.is_final_state(s) {
quote! { true }
} else {
quote! { false }
};
quote! { #state_enum_name::#s(_) => #val }
});
let states_enum_impl = quote! {
impl #state_enum_name {
fn is_final(&self) -> bool {
match self {
#(#state_is_final_match_arms),*
}
}
}
};
// Build the events enum
let events: HashSet<Variant> = self.transitions.iter().map(|t| t.event.clone()).collect();
let events_enum_name = Ident::new(&format!("{}Events", name), name.span());
let events: Vec<_> = events.into_iter().collect();
let events_enum = quote! {
#visibility enum #events_enum_name {
#(#events),*
}
};
// Construct the trait implementation
let cmd_type = &self.command_type;
let err_type = &self.error_type;
let mut statemap: HashMap<Ident, Vec<Transition>> = HashMap::new();
for t in &self.transitions {
statemap
.entry(t.from.clone())
.and_modify(|v| v.push(t.clone()))
.or_insert_with(|| vec![t.clone()]);
}
// Add any states without any transitions to the map
for s in &states {
if !statemap.contains_key(s) {
statemap.insert(s.clone(), vec![]);
}
}
let state_branches = statemap.iter().map(|(from, transitions)| {
let event_branches = transitions
.iter()
.map(|ts| {
let ev_variant = &ts.event.ident;
if let Some(ts_fn) = ts.handler.clone() {
let span = ts_fn.span();
match ts.event.fields {
Fields::Unnamed(_) => {
let arglist = if ts.mutates_shared {
quote! {self.shared_state, val}
} else {
quote! {val}
};
quote_spanned! {span=>
#events_enum_name::#ev_variant(val) => {
state_data.#ts_fn(#arglist)
}
}
}
Fields::Unit => {
let arglist = if ts.mutates_shared {
quote! {self.shared_state}
} else {
quote! {}
};
quote_spanned! {span=>
#events_enum_name::#ev_variant => {
state_data.#ts_fn(#arglist)
}
}
}
Fields::Named(_) => unreachable!(),
}
} else {
// If events do not have a handler, attempt to construct the next state
// using `Default`.
let new_state = ts.to.clone();
let span = new_state.span();
let default_trans = quote_spanned! {span=>
TransitionResult::from::<#from, #new_state>(state_data)
};
let span = ts.event.span();
match ts.event.fields {
Fields::Unnamed(_) => quote_spanned! {span=>
#events_enum_name::#ev_variant(_val) => {
#default_trans
}
},
Fields::Unit => quote_spanned! {span=>
#events_enum_name::#ev_variant => {
#default_trans
}
},
Fields::Named(_) => unreachable!(),
}
}
})
// Since most states won't handle every possible event, return an error to that effect
.chain(std::iter::once(
quote! { _ => { return TransitionResult::InvalidTransition } },
));
quote! {
#state_enum_name::#from(state_data) => match event {
#(#event_branches),*
}
}
});
let trait_impl = quote! {
impl ::rustfsm::StateMachine for #name {
type Error = #err_type;
type State = #state_enum_name;
type SharedState = #shared_state_type;
type Event = #events_enum_name;
type Command = #cmd_type;
fn name(&self) -> &str {
#name_str
}
fn on_event(self, event: #events_enum_name)
-> ::rustfsm::TransitionResult<Self> {
match self.state {
#(#state_branches),*
}
}
fn state(&self) -> &Self::State {
&self.state
}
fn set_state(&mut self, new: Self::State) {
self.state = new
}
fn shared_state(&self) -> &Self::SharedState{
&self.shared_state
}
fn on_final_state(&self) -> bool {
self.state.is_final()
}
fn from_parts(shared: Self::SharedState, state: Self::State) -> Self {
Self { shared_state: shared, state }
}
}
};
let transition_result_name = Ident::new(&format!("{}Transition", name), name.span());
let transition_type_alias = quote! {
type #transition_result_name = TransitionResult<#name>;
};
let output = quote! {
#transition_type_alias
#machine_struct
#states_enum
#states_enum_impl
#events_enum
#trait_impl
};
output.into()
}
}
| 37.652174 | 168 | 0.524902 |
0e6e2fd3675f6325b6f905d26f826dd1b32e97d3 | 5,396 | use crate::sql_database_migration_steps_inferrer::wrap_as_step;
use crate::sql_migration_step::*;
use database_inspector::{Column, DatabaseSchema, Table};
pub struct DatabaseSchemaDiffer {
previous: DatabaseSchema,
next: DatabaseSchema,
}
impl DatabaseSchemaDiffer {
pub fn diff(previous: DatabaseSchema, next: DatabaseSchema) -> Vec<SqlMigrationStep> {
let differ = DatabaseSchemaDiffer { previous, next };
differ.diff_internal()
}
fn diff_internal(&self) -> Vec<SqlMigrationStep> {
let mut result = Vec::new();
result.append(&mut wrap_as_step(self.create_tables(), |x| {
SqlMigrationStep::CreateTable(x)
}));
result.append(&mut wrap_as_step(self.drop_tables(), |x| {
SqlMigrationStep::DropTable(x)
}));
result.append(&mut wrap_as_step(self.alter_tables(), |x| {
SqlMigrationStep::AlterTable(x)
}));
result
}
fn create_tables(&self) -> Vec<CreateTable> {
let mut result = Vec::new();
for next_table in &self.next.tables {
if !self.previous.has_table(&next_table.name) {
let primary_columns = next_table
.indexes
.iter()
.find(|i| i.unique)
.map(|i| i.columns.clone())
.unwrap_or(Vec::new());
let create = CreateTable {
name: next_table.name.clone(),
columns: Self::column_descriptions(&next_table.columns),
primary_columns: primary_columns,
};
result.push(create);
}
}
result
}
fn drop_tables(&self) -> Vec<DropTable> {
let mut result = Vec::new();
for previous_table in &self.previous.tables {
if !self.next.has_table(&previous_table.name) && previous_table.name != "_Migration" {
let drop = DropTable {
name: previous_table.name.clone(),
};
result.push(drop);
}
}
result
}
fn alter_tables(&self) -> Vec<AlterTable> {
let mut result = Vec::new();
for previous_table in &self.previous.tables {
if let Some(next_table) = self.next.table(&previous_table.name) {
let mut changes = Vec::new();
changes.append(&mut Self::drop_columns(&previous_table, &next_table));
changes.append(&mut Self::add_columns(&previous_table, &next_table));
changes.append(&mut Self::alter_columns(&previous_table, &next_table));
if !changes.is_empty() {
let update = AlterTable {
table: previous_table.name.clone(),
changes: changes,
};
result.push(update);
}
}
}
result
}
fn drop_columns(previous: &Table, next: &Table) -> Vec<TableChange> {
let mut result = Vec::new();
for previous_column in &previous.columns {
if !next.has_column(&previous_column.name) {
let change = DropColumn {
name: previous_column.name.clone(),
};
result.push(TableChange::DropColumn(change));
}
}
result
}
fn add_columns(previous: &Table, next: &Table) -> Vec<TableChange> {
let mut result = Vec::new();
for next_column in &next.columns {
if !previous.has_column(&next_column.name) {
let change = AddColumn {
column: Self::column_description(next_column),
};
result.push(TableChange::AddColumn(change));
}
}
result
}
fn alter_columns(previous: &Table, next: &Table) -> Vec<TableChange> {
let mut result = Vec::new();
for next_column in &next.columns {
if let Some(previous_column) = previous.column(&next_column.name) {
if previous_column != next_column {
let change = AlterColumn {
name: previous_column.name.clone(),
column: Self::column_description(next_column),
};
result.push(TableChange::AlterColumn(change));
}
}
}
result
}
fn column_descriptions(columns: &Vec<Column>) -> Vec<ColumnDescription> {
columns.iter().map(Self::column_description).collect()
}
fn column_description(column: &Column) -> ColumnDescription {
ColumnDescription {
name: column.name.clone(),
tpe: Self::convert_column_type(column.tpe),
required: column.is_required,
}
}
fn convert_column_type(inspector_type: database_inspector::ColumnType) -> ColumnType {
match inspector_type {
database_inspector::ColumnType::Boolean => ColumnType::Boolean,
database_inspector::ColumnType::Int => ColumnType::Int,
database_inspector::ColumnType::Float => ColumnType::Float,
database_inspector::ColumnType::String => ColumnType::String,
database_inspector::ColumnType::DateTime => ColumnType::DateTime,
}
}
}
| 35.973333 | 98 | 0.546887 |
9181f6cf1de5c2925a4ddee72f9b00fe3167304a | 24,850 | // Copyright 2015 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use base::CGFloat;
use color_space::CGColorSpace;
use core_foundation::base::{CFTypeID, TCFType};
use font::{CGFont, CGGlyph};
use geometry::{CGPoint, CGSize};
use gradient::{CGGradient, CGGradientDrawingOptions};
use color::CGColor;
use path::CGPathRef;
use libc::{c_int, size_t};
use std::os::raw::c_void;
use std::cmp;
use std::ptr;
use std::slice;
use geometry::{CGAffineTransform, CGRect};
use image::CGImage;
use foreign_types::{ForeignType, ForeignTypeRef};
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub enum CGBlendMode {
Normal = 0,
Multiply,
Screen,
Overlay,
Darken,
Lighten,
ColorDodge,
ColorBurn,
SoftLight,
HardLight,
Difference,
Exclusion,
Hue,
Saturation,
Color,
Luminosity,
// 10.5 and up:
Clear,
Copy,
SourceIn,
SourceOut,
SourceAtop,
DestinationOver,
DestinationIn,
DestinationOut,
DestinationAtop,
Xor,
PlusDarker,
PlusLighter,
}
#[repr(C)]
pub enum CGTextDrawingMode {
CGTextFill,
CGTextStroke,
CGTextFillStroke,
CGTextInvisible,
CGTextFillClip,
CGTextStrokeClip,
CGTextFillStrokeClip,
CGTextClip
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub enum CGLineCap {
CGLineCapButt,
CGLineCapRound,
CGLineCapSquare,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub enum CGLineJoin {
CGLineJoinMiter,
CGLineJoinRound,
CGLineJoinBevel,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub enum CGPathDrawingMode {
CGPathFill,
CGPathEOFill,
CGPathStroke,
CGPathFillStroke,
CGPathEOFillStroke,
}
foreign_type! {
#[doc(hidden)]
type CType = ::sys::CGContext;
fn drop = |cs| CGContextRelease(cs);
fn clone = |p| CGContextRetain(p);
pub struct CGContext;
pub struct CGContextRef;
}
impl CGContext {
pub fn type_id() -> CFTypeID {
unsafe {
CGContextGetTypeID()
}
}
/// Creates a `CGContext` instance from an existing [`CGContextRef`] pointer.
///
/// This funtion will internally call [`CGRetain`] and hence there is no need to call it explicitly.
///
/// This function is particularly useful for cases when the context is not instantiated/managed
/// by the caller, but it's retrieve via other means (e.g., by calling the method [`NSGraphicsContext::CGContext`]
/// in a cocoa application).
///
/// [`CGContextRef`]: https://developer.apple.com/documentation/coregraphics/cgcontextref
/// [`CGRetain`]: https://developer.apple.com/documentation/coregraphics/1586506-cgcontextretain
/// [`NSGraphicsContext::CGContext`]: https://developer.apple.com/documentation/appkit/nsgraphicscontext/1535352-currentcontext
pub unsafe fn from_existing_context_ptr(ctx: *mut ::sys::CGContext) -> CGContext {
CGContextRetain(ctx);
Self::from_ptr(ctx)
}
pub fn create_bitmap_context(data: Option<*mut c_void>,
width: size_t,
height: size_t,
bits_per_component: size_t,
bytes_per_row: size_t,
space: &CGColorSpace,
bitmap_info: u32)
-> CGContext {
unsafe {
let result = CGBitmapContextCreate(data.unwrap_or(ptr::null_mut()),
width,
height,
bits_per_component,
bytes_per_row,
space.as_ptr(),
bitmap_info);
assert!(!result.is_null());
Self::from_ptr(result)
}
}
pub fn data(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(
CGBitmapContextGetData(self.as_ptr()) as *mut u8,
(self.height() * self.bytes_per_row()) as usize)
}
}
}
impl CGContextRef {
pub fn flush(&self) {
unsafe {
CGContextFlush(self.as_ptr())
}
}
pub fn width(&self) -> size_t {
unsafe {
CGBitmapContextGetWidth(self.as_ptr())
}
}
pub fn height(&self) -> size_t {
unsafe {
CGBitmapContextGetHeight(self.as_ptr())
}
}
pub fn bytes_per_row(&self) -> size_t {
unsafe {
CGBitmapContextGetBytesPerRow(self.as_ptr())
}
}
pub fn clip_bounding_box(&self) -> CGRect {
unsafe {
CGContextGetClipBoundingBox(self.as_ptr())
}
}
pub fn set_fill_color(&self, color: &CGColor) {
unsafe {
CGContextSetFillColorWithColor(self.as_ptr(), color.as_concrete_TypeRef());
}
}
pub fn set_rgb_fill_color(&self, red: CGFloat, green: CGFloat, blue: CGFloat, alpha: CGFloat) {
unsafe {
CGContextSetRGBFillColor(self.as_ptr(), red, green, blue, alpha)
}
}
pub fn set_rgb_stroke_color(&self, red: CGFloat, green: CGFloat, blue: CGFloat, alpha: CGFloat) {
unsafe {
CGContextSetRGBStrokeColor(self.as_ptr(), red, green, blue, alpha)
}
}
pub fn set_gray_fill_color(&self, gray: CGFloat, alpha: CGFloat) {
unsafe {
CGContextSetGrayFillColor(self.as_ptr(), gray, alpha)
}
}
pub fn set_blend_mode(&self, blend_mode: CGBlendMode) {
unsafe {
CGContextSetBlendMode(self.as_ptr(), blend_mode)
}
}
pub fn set_allows_font_smoothing(&self, allows_font_smoothing: bool) {
unsafe {
CGContextSetAllowsFontSmoothing(self.as_ptr(), allows_font_smoothing)
}
}
pub fn set_font_smoothing_style(&self, style: i32) {
unsafe {
CGContextSetFontSmoothingStyle(self.as_ptr(), style as _);
}
}
pub fn set_should_smooth_fonts(&self, should_smooth_fonts: bool) {
unsafe {
CGContextSetShouldSmoothFonts(self.as_ptr(), should_smooth_fonts)
}
}
pub fn set_allows_antialiasing(&self, allows_antialiasing: bool) {
unsafe {
CGContextSetAllowsAntialiasing(self.as_ptr(), allows_antialiasing)
}
}
pub fn set_should_antialias(&self, should_antialias: bool) {
unsafe {
CGContextSetShouldAntialias(self.as_ptr(), should_antialias)
}
}
pub fn set_allows_font_subpixel_quantization(&self, allows_font_subpixel_quantization: bool) {
unsafe {
CGContextSetAllowsFontSubpixelQuantization(self.as_ptr(), allows_font_subpixel_quantization)
}
}
pub fn set_should_subpixel_quantize_fonts(&self, should_subpixel_quantize_fonts: bool) {
unsafe {
CGContextSetShouldSubpixelQuantizeFonts(self.as_ptr(), should_subpixel_quantize_fonts)
}
}
pub fn set_allows_font_subpixel_positioning(&self, allows_font_subpixel_positioning: bool) {
unsafe {
CGContextSetAllowsFontSubpixelPositioning(self.as_ptr(), allows_font_subpixel_positioning)
}
}
pub fn set_should_subpixel_position_fonts(&self, should_subpixel_position_fonts: bool) {
unsafe {
CGContextSetShouldSubpixelPositionFonts(self.as_ptr(), should_subpixel_position_fonts)
}
}
pub fn set_text_drawing_mode(&self, mode: CGTextDrawingMode) {
unsafe {
CGContextSetTextDrawingMode(self.as_ptr(), mode)
}
}
pub fn set_line_cap(&self, cap: CGLineCap) {
unsafe {
CGContextSetLineCap(self.as_ptr(), cap)
}
}
pub fn set_line_dash(&self, phase: CGFloat, lengths: &[CGFloat]) {
unsafe {
CGContextSetLineDash(self.as_ptr(), phase, lengths.as_ptr(), lengths.len())
}
}
pub fn set_line_join(&self, join: CGLineJoin) {
unsafe {
CGContextSetLineJoin(self.as_ptr(), join)
}
}
pub fn set_line_width(&self, width: CGFloat) {
unsafe {
CGContextSetLineWidth(self.as_ptr(), width)
}
}
pub fn set_miter_limit(&self, limit: CGFloat) {
unsafe {
CGContextSetMiterLimit(self.as_ptr(), limit)
}
}
pub fn add_path(&self, path: &CGPathRef) {
unsafe {
CGContextAddPath(self.as_ptr(), path.as_ptr());
}
}
pub fn add_curve_to_point(&self,
cp1x: CGFloat,
cp1y: CGFloat,
cp2x: CGFloat,
cp2y: CGFloat,
x: CGFloat,
y: CGFloat) {
unsafe {
CGContextAddCurveToPoint(self.as_ptr(),
cp1x, cp1y,
cp2x, cp2y,
x, y);
}
}
pub fn add_quad_curve_to_point(&self,
cpx: CGFloat,
cpy: CGFloat,
x: CGFloat,
y: CGFloat) {
unsafe {
CGContextAddQuadCurveToPoint(self.as_ptr(),
cpx, cpy,
x, y);
}
}
pub fn add_line_to_point(&self, x: CGFloat, y: CGFloat) {
unsafe {
CGContextAddLineToPoint(self.as_ptr(), x, y);
}
}
pub fn begin_path(&self) {
unsafe {
CGContextBeginPath(self.as_ptr());
}
}
pub fn close_path(&self) {
unsafe {
CGContextClosePath(self.as_ptr());
}
}
pub fn move_to_point(&self, x: CGFloat, y: CGFloat) {
unsafe {
CGContextMoveToPoint(self.as_ptr(), x, y);
}
}
pub fn clip(&self) {
unsafe {
CGContextClip(self.as_ptr());
}
}
pub fn eo_clip(&self) {
unsafe {
CGContextEOClip(self.as_ptr());
}
}
pub fn draw_path(&self, mode: CGPathDrawingMode) {
unsafe {
CGContextDrawPath(self.as_ptr(), mode);
}
}
pub fn fill_path(&self) {
unsafe {
CGContextFillPath(self.as_ptr());
}
}
pub fn eo_fill_path(&self) {
unsafe {
CGContextEOFillPath(self.as_ptr());
}
}
pub fn stroke_path(&self) {
unsafe {
CGContextStrokePath(self.as_ptr());
}
}
pub fn fill_rect(&self, rect: CGRect) {
unsafe {
CGContextFillRect(self.as_ptr(), rect)
}
}
pub fn fill_rects(&self, rects: &[CGRect]) {
unsafe {
CGContextFillRects(self.as_ptr(), rects.as_ptr(), rects.len())
}
}
pub fn clear_rect(&self, rect: CGRect) {
unsafe {
CGContextClearRect(self.as_ptr(), rect)
}
}
pub fn stroke_rect(&self, rect: CGRect) {
unsafe {
CGContextStrokeRect(self.as_ptr(), rect)
}
}
pub fn stroke_rect_with_width(&self, rect: CGRect, width: CGFloat) {
unsafe {
CGContextStrokeRectWithWidth(self.as_ptr(), rect, width)
}
}
pub fn clip_to_rect(&self, rect: CGRect) {
unsafe {
CGContextClipToRect(self.as_ptr(), rect)
}
}
pub fn clip_to_rects(&self, rects: &[CGRect]) {
unsafe {
CGContextClipToRects(self.as_ptr(), rects.as_ptr(), rects.len())
}
}
pub fn replace_path_with_stroked_path(&self) {
unsafe {
CGContextReplacePathWithStrokedPath(self.as_ptr())
}
}
pub fn fill_ellipse_in_rect(&self, rect: CGRect) {
unsafe {
CGContextFillEllipseInRect(self.as_ptr(), rect)
}
}
pub fn stroke_ellipse_in_rect(&self, rect: CGRect) {
unsafe {
CGContextStrokeEllipseInRect(self.as_ptr(), rect)
}
}
pub fn stroke_line_segments(&self, points: &[CGPoint]) {
unsafe {
CGContextStrokeLineSegments(self.as_ptr(), points.as_ptr(), points.len())
}
}
pub fn draw_image(&self, rect: CGRect, image: &CGImage) {
unsafe {
CGContextDrawImage(self.as_ptr(), rect, image.as_ptr());
}
}
pub fn create_image(&self) -> Option<CGImage> {
let image = unsafe { CGBitmapContextCreateImage(self.as_ptr()) };
if !image.is_null() {
Some(unsafe { CGImage::from_ptr(image) })
} else {
None
}
}
pub fn set_font(&self, font: &CGFont) {
unsafe {
CGContextSetFont(self.as_ptr(), font.as_ptr())
}
}
pub fn set_font_size(&self, size: CGFloat) {
unsafe {
CGContextSetFontSize(self.as_ptr(), size)
}
}
pub fn set_text_matrix(&self, t: &CGAffineTransform) {
unsafe {
CGContextSetTextMatrix(self.as_ptr(), *t)
}
}
pub fn set_text_position(&self, x: CGFloat, y: CGFloat) {
unsafe {
CGContextSetTextPosition(self.as_ptr(), x, y)
}
}
pub fn show_glyphs_at_positions(&self, glyphs: &[CGGlyph], positions: &[CGPoint]) {
unsafe {
let count = cmp::min(glyphs.len(), positions.len());
CGContextShowGlyphsAtPositions(self.as_ptr(),
glyphs.as_ptr(),
positions.as_ptr(),
count)
}
}
pub fn save(&self) {
unsafe {
CGContextSaveGState(self.as_ptr());
}
}
pub fn restore(&self) {
unsafe {
CGContextRestoreGState(self.as_ptr());
}
}
pub fn translate(&self, tx: CGFloat, ty: CGFloat) {
unsafe {
CGContextTranslateCTM(self.as_ptr(), tx, ty);
}
}
pub fn scale(&self, sx: CGFloat, sy: CGFloat) {
unsafe {
CGContextScaleCTM(self.as_ptr(), sx, sy);
}
}
pub fn rotate(&self, angle: CGFloat) {
unsafe {
CGContextRotateCTM(self.as_ptr(), angle);
}
}
pub fn get_ctm(&self) -> CGAffineTransform {
unsafe {
CGContextGetCTM(self.as_ptr())
}
}
pub fn concat_ctm(&self, transform: CGAffineTransform) {
unsafe {
CGContextConcatCTM(self.as_ptr(), transform)
}
}
pub fn draw_linear_gradient(&self, gradient: &CGGradient, start_point: CGPoint, end_point: CGPoint, options: CGGradientDrawingOptions) {
unsafe {
CGContextDrawLinearGradient(self.as_ptr(), gradient.as_ptr(), start_point, end_point, options);
}
}
pub fn draw_radial_gradient(&self, gradient: &CGGradient, start_center: CGPoint, start_radius: CGFloat, end_center: CGPoint, end_radius: CGFloat, options: CGGradientDrawingOptions) {
unsafe {
CGContextDrawRadialGradient(self.as_ptr(), gradient.as_ptr(), start_center, start_radius, end_center, end_radius, options);
}
}
pub fn set_shadow(&self, offset: CGSize, blur: CGFloat) {
unsafe {
CGContextSetShadow(self.as_ptr(), offset, blur);
}
}
pub fn set_shadow_with_color(&self, offset: CGSize, blur: CGFloat, color: &CGColor) {
unsafe {
CGContextSetShadowWithColor(self.as_ptr(), offset, blur, color.as_concrete_TypeRef());
}
}
}
#[test]
fn create_bitmap_context_test() {
use geometry::*;
let cs = CGColorSpace::create_device_rgb();
let ctx = CGContext::create_bitmap_context(None,
16, 8,
8, 0,
&cs,
::base::kCGImageAlphaPremultipliedLast);
ctx.set_rgb_fill_color(1.,0.,1.,1.);
ctx.set_miter_limit(4.);
ctx.fill_rect(CGRect::new(&CGPoint::new(0.,0.), &CGSize::new(8.,8.)));
let img = ctx.create_image().unwrap();
assert_eq!(16, img.width());
assert_eq!(8, img.height());
assert_eq!(8, img.bits_per_component());
assert_eq!(32, img.bits_per_pixel());
let data = img.data();
assert_eq!(255, data.bytes()[0]);
assert_eq!(0, data.bytes()[1]);
assert_eq!(255, data.bytes()[2]);
assert_eq!(255, data.bytes()[3]);
}
#[link(name = "CoreGraphics", kind = "framework")]
extern {
fn CGContextRetain(c: ::sys::CGContextRef) -> ::sys::CGContextRef;
fn CGContextRelease(c: ::sys::CGContextRef);
fn CGBitmapContextCreate(data: *mut c_void,
width: size_t,
height: size_t,
bitsPerComponent: size_t,
bytesPerRow: size_t,
space: ::sys::CGColorSpaceRef,
bitmapInfo: u32)
-> ::sys::CGContextRef;
fn CGBitmapContextGetData(context: ::sys::CGContextRef) -> *mut c_void;
fn CGBitmapContextGetWidth(context: ::sys::CGContextRef) -> size_t;
fn CGBitmapContextGetHeight(context: ::sys::CGContextRef) -> size_t;
fn CGBitmapContextGetBytesPerRow(context: ::sys::CGContextRef) -> size_t;
fn CGBitmapContextCreateImage(context: ::sys::CGContextRef) -> ::sys::CGImageRef;
fn CGContextGetTypeID() -> CFTypeID;
fn CGContextGetClipBoundingBox(c: ::sys::CGContextRef) -> CGRect;
fn CGContextFlush(c: ::sys::CGContextRef);
fn CGContextSetBlendMode(c: ::sys::CGContextRef, blendMode: CGBlendMode);
fn CGContextSetAllowsFontSmoothing(c: ::sys::CGContextRef, allowsFontSmoothing: bool);
fn CGContextSetShouldSmoothFonts(c: ::sys::CGContextRef, shouldSmoothFonts: bool);
fn CGContextSetFontSmoothingStyle(c: ::sys::CGContextRef, style: c_int);
fn CGContextSetAllowsAntialiasing(c: ::sys::CGContextRef, allowsAntialiasing: bool);
fn CGContextSetShouldAntialias(c: ::sys::CGContextRef, shouldAntialias: bool);
fn CGContextSetAllowsFontSubpixelQuantization(c: ::sys::CGContextRef,
allowsFontSubpixelQuantization: bool);
fn CGContextSetShouldSubpixelQuantizeFonts(c: ::sys::CGContextRef,
shouldSubpixelQuantizeFonts: bool);
fn CGContextSetAllowsFontSubpixelPositioning(c: ::sys::CGContextRef,
allowsFontSubpixelPositioning: bool);
fn CGContextSetShouldSubpixelPositionFonts(c: ::sys::CGContextRef,
shouldSubpixelPositionFonts: bool);
fn CGContextSetTextDrawingMode(c: ::sys::CGContextRef, mode: CGTextDrawingMode);
fn CGContextSetFillColorWithColor(c: ::sys::CGContextRef, color: ::sys::CGColorRef);
fn CGContextSetLineCap(c: ::sys::CGContextRef, cap: CGLineCap);
fn CGContextSetLineDash(c: ::sys::CGContextRef, phase: CGFloat, lengths: *const CGFloat, count: size_t);
fn CGContextSetLineJoin(c: ::sys::CGContextRef, join: CGLineJoin);
fn CGContextSetLineWidth(c: ::sys::CGContextRef, width: CGFloat);
fn CGContextSetMiterLimit(c: ::sys::CGContextRef, limit: CGFloat);
fn CGContextAddPath(c: ::sys::CGContextRef, path: ::sys::CGPathRef);
fn CGContextAddCurveToPoint(c: ::sys::CGContextRef,
cp1x: CGFloat,
cp1y: CGFloat,
cp2x: CGFloat,
cp2y: CGFloat,
x: CGFloat,
y: CGFloat);
fn CGContextAddQuadCurveToPoint(c: ::sys::CGContextRef,
cpx: CGFloat,
cpy: CGFloat,
x: CGFloat,
y: CGFloat);
fn CGContextAddLineToPoint(c: ::sys::CGContextRef,
x: CGFloat,
y: CGFloat);
fn CGContextBeginPath(c: ::sys::CGContextRef);
fn CGContextClosePath(c: ::sys::CGContextRef);
fn CGContextMoveToPoint(c: ::sys::CGContextRef,
x: CGFloat,
y: CGFloat);
fn CGContextDrawPath(c: ::sys::CGContextRef, mode: CGPathDrawingMode);
fn CGContextFillPath(c: ::sys::CGContextRef);
fn CGContextEOFillPath(c: ::sys::CGContextRef);
fn CGContextClip(c: ::sys::CGContextRef);
fn CGContextEOClip(c: ::sys::CGContextRef);
fn CGContextStrokePath(c: ::sys::CGContextRef);
fn CGContextSetRGBFillColor(context: ::sys::CGContextRef,
red: CGFloat,
green: CGFloat,
blue: CGFloat,
alpha: CGFloat);
fn CGContextSetRGBStrokeColor(context: ::sys::CGContextRef,
red: CGFloat,
green: CGFloat,
blue: CGFloat,
alpha: CGFloat);
fn CGContextSetGrayFillColor(context: ::sys::CGContextRef, gray: CGFloat, alpha: CGFloat);
fn CGContextClearRect(context: ::sys::CGContextRef,
rect: CGRect);
fn CGContextFillRect(context: ::sys::CGContextRef,
rect: CGRect);
fn CGContextFillRects(context: ::sys::CGContextRef,
rects: *const CGRect,
count: size_t);
fn CGContextStrokeRect(context: ::sys::CGContextRef,
rect: CGRect);
fn CGContextStrokeRectWithWidth(context: ::sys::CGContextRef,
rect: CGRect,
width: CGFloat);
fn CGContextClipToRect(context: ::sys::CGContextRef,
rect: CGRect);
fn CGContextClipToRects(context: ::sys::CGContextRef,
rects: *const CGRect,
count: size_t);
fn CGContextReplacePathWithStrokedPath(context: ::sys::CGContextRef);
fn CGContextFillEllipseInRect(context: ::sys::CGContextRef,
rect: CGRect);
fn CGContextStrokeEllipseInRect(context: ::sys::CGContextRef,
rect: CGRect);
fn CGContextStrokeLineSegments(context: ::sys::CGContextRef,
points: *const CGPoint,
count: size_t);
fn CGContextDrawImage(c: ::sys::CGContextRef, rect: CGRect, image: ::sys::CGImageRef);
fn CGContextSetFont(c: ::sys::CGContextRef, font: ::sys::CGFontRef);
fn CGContextSetFontSize(c: ::sys::CGContextRef, size: CGFloat);
fn CGContextSetTextMatrix(c: ::sys::CGContextRef, t: CGAffineTransform);
fn CGContextSetTextPosition(c: ::sys::CGContextRef, x: CGFloat, y: CGFloat);
fn CGContextShowGlyphsAtPositions(c: ::sys::CGContextRef,
glyphs: *const CGGlyph,
positions: *const CGPoint,
count: size_t);
fn CGContextSaveGState(c: ::sys::CGContextRef);
fn CGContextRestoreGState(c: ::sys::CGContextRef);
fn CGContextTranslateCTM(c: ::sys::CGContextRef, tx: CGFloat, ty: CGFloat);
fn CGContextScaleCTM(c: ::sys::CGContextRef, sx: CGFloat, sy: CGFloat);
fn CGContextRotateCTM(c: ::sys::CGContextRef, angle: CGFloat);
fn CGContextGetCTM(c: ::sys::CGContextRef) -> CGAffineTransform;
fn CGContextConcatCTM(c: ::sys::CGContextRef, transform: CGAffineTransform);
fn CGContextDrawLinearGradient(c: ::sys::CGContextRef, gradient: ::sys::CGGradientRef, startPoint: CGPoint, endPoint: CGPoint, options: CGGradientDrawingOptions);
fn CGContextDrawRadialGradient(c: ::sys::CGContextRef, gradient: ::sys::CGGradientRef, startCenter: CGPoint, startRadius: CGFloat, endCenter:CGPoint, endRadius:CGFloat, options: CGGradientDrawingOptions);
fn CGContextSetShadow(c: ::sys::CGContextRef, offset: CGSize, blur: CGFloat);
fn CGContextSetShadowWithColor(c: ::sys::CGContextRef, offset: CGSize, blur: CGFloat, color: ::sys::CGColorRef);
}
| 33.400538 | 209 | 0.56672 |
76235a1a3077a80f8ae71f5ee99ad16321ccbac1 | 4,271 | // this file is auto-generated by hap-codegen
use async_trait::async_trait;
use serde::Serialize;
use serde_json::json;
use crate::{
characteristic::{
AsyncCharacteristicCallbacks,
Characteristic,
CharacteristicCallbacks,
Format,
HapCharacteristic,
HapCharacteristicSetup,
HapType,
OnReadFn,
OnReadFuture,
OnUpdateFn,
OnUpdateFuture,
Perm,
Unit,
},
pointer,
Error,
Result,
};
// TODO - re-check MaximumDataLength
/// Current Relative Humidity characteristic.
#[derive(Debug, Default, Serialize)]
pub struct CurrentRelativeHumidityCharacteristic(Characteristic<f32>);
impl CurrentRelativeHumidityCharacteristic {
/// Creates a new Current Relative Humidity characteristic.
pub fn new(id: u64, accessory_id: u64) -> Self {
#[allow(unused_mut)]
let mut c = Self(Characteristic::<f32> {
id,
accessory_id,
hap_type: HapType::CurrentRelativeHumidity,
format: Format::Float,
perms: vec![
Perm::Events,
Perm::PairedRead,
],
unit: Some(Unit::Percentage),
max_value: Some(100 as f32),
min_value: Some(0 as f32),
step_value: Some(1 as f32),
..Default::default()
});
if let Some(ref min_value) = &c.0.min_value {
c.0.value = min_value.clone();
} else if let Some(ref valid_values) = &c.0.valid_values {
if valid_values.len() > 0 {
c.0.value = valid_values[0].clone();
}
}
c
}
}
#[async_trait]
impl HapCharacteristic for CurrentRelativeHumidityCharacteristic {
fn get_id(&self) -> u64 { self.0.get_id() }
fn get_type(&self) -> HapType { self.0.get_type() }
fn get_format(&self) -> Format { self.0.get_format() }
fn get_perms(&self) -> Vec<Perm> { self.0.get_perms() }
fn get_event_notifications(&self) -> Option<bool> { self.0.get_event_notifications() }
fn set_event_notifications(&mut self, event_notifications: Option<bool>) {
self.0.set_event_notifications(event_notifications)
}
async fn get_value(&mut self) -> Result<serde_json::Value> {
let value = self.0.get_value().await?;
Ok(json!(value))
}
async fn set_value(&mut self, value: serde_json::Value) -> Result<()> {
let v;
// for whatever reason, the controller is setting boolean values either as a boolean or as an integer
if self.0.format == Format::Bool && value.is_number() {
let num_v: u8 = serde_json::from_value(value)?;
if num_v == 0 {
v = serde_json::from_value(json!(false))?;
} else if num_v == 1 {
v = serde_json::from_value(json!(true))?;
} else {
return Err(Error::InvalidValue(self.get_format()));
}
} else {
v = serde_json::from_value(value).map_err(|_| Error::InvalidValue(self.get_format()))?;
}
self.0.set_value(v).await
}
fn get_unit(&self) -> Option<Unit> { self.0.get_unit() }
fn get_max_value(&self) -> Option<serde_json::Value> { self.0.get_max_value().map(|v| json!(v)) }
fn get_min_value(&self) -> Option<serde_json::Value> { self.0.get_min_value().map(|v| json!(v)) }
fn get_step_value(&self) -> Option<serde_json::Value> { self.0.get_step_value().map(|v| json!(v)) }
fn get_max_len(&self) -> Option<u16> { self.0.get_max_len() }
}
impl HapCharacteristicSetup for CurrentRelativeHumidityCharacteristic {
fn set_event_emitter(&mut self, event_emitter: Option<pointer::EventEmitter>) {
self.0.set_event_emitter(event_emitter)
}
}
impl CharacteristicCallbacks<f32> for CurrentRelativeHumidityCharacteristic {
fn on_read(&mut self, f: Option<impl OnReadFn<f32>>) { self.0.on_read(f) }
fn on_update(&mut self, f: Option<impl OnUpdateFn<f32>>) { self.0.on_update(f) }
}
impl AsyncCharacteristicCallbacks<f32> for CurrentRelativeHumidityCharacteristic {
fn on_read_async(&mut self, f: Option<impl OnReadFuture<f32>>) { self.0.on_read_async(f) }
fn on_update_async(&mut self, f: Option<impl OnUpdateFuture<f32>>) { self.0.on_update_async(f) }
}
| 32.356061 | 109 | 0.627019 |
8f1c975a83893318640a6c1a0745c05838ff92b2 | 17 | pub mod witness;
| 8.5 | 16 | 0.764706 |
eb22e0b6a5a9691690dff08b50acc0f8388b2234 | 1,255 | use jormungandr_lib::testing::{Speed, SpeedBenchmarkDef, SpeedBenchmarkFinish, Timestamp};
use std::time::Duration;
use crate::common::jormungandr::logger::JormungandrLogger;
pub fn storage_loading_benchmark_from_log(
log: &JormungandrLogger,
name: &str,
timeout: Duration,
) -> SpeedBenchmarkFinish {
speed_benchmark_from_log(
log,
name.clone(),
timeout,
"storing blockchain",
"Loaded from storage",
)
}
pub fn speed_benchmark_from_log(
log: &JormungandrLogger,
name: &str,
timeout: Duration,
start_measurement: &str,
stop_measurement: &str,
) -> SpeedBenchmarkFinish {
let start_entry: Timestamp = log
.get_log_entries()
.find(|x| x.msg.contains(start_measurement))
.expect("cannot find start mesurement entry in log")
.into();
let stop_entry: Timestamp = log
.get_log_entries()
.find(|x| x.msg.contains(stop_measurement))
.expect("cannot find stop mesurement entry in log")
.into();
let definition = SpeedBenchmarkDef::new(name.to_string())
.target(timeout)
.clone();
let speed = Speed::new(&start_entry, &stop_entry);
SpeedBenchmarkFinish::new(definition, speed)
}
| 27.282609 | 90 | 0.658167 |
4a3086f53e7c07aadfb27212c1b424303fe8f228 | 2,529 | #![doc = "generated by AutoRust 0.1.0"]
#[cfg(feature = "package-2021-04-01-preview")]
mod package_2021_04_01_preview;
#[cfg(feature = "package-2021-04-01-preview")]
pub use package_2021_04_01_preview::{models, operations, API_VERSION};
#[cfg(feature = "package-2020-03-15-preview")]
mod package_2020_03_15_preview;
use azure_core::setters;
#[cfg(feature = "package-2020-03-15-preview")]
pub use package_2020_03_15_preview::{models, operations, API_VERSION};
pub fn config(
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
token_credential: Box<dyn azure_core::TokenCredential>,
) -> OperationConfigBuilder {
OperationConfigBuilder {
api_version: None,
http_client,
base_path: None,
token_credential,
token_credential_resource: None,
}
}
pub struct OperationConfigBuilder {
api_version: Option<String>,
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
base_path: Option<String>,
token_credential: Box<dyn azure_core::TokenCredential>,
token_credential_resource: Option<String>,
}
impl OperationConfigBuilder {
setters! { api_version : String => Some (api_version) , base_path : String => Some (base_path) , token_credential_resource : String => Some (token_credential_resource) , }
pub fn build(self) -> OperationConfig {
OperationConfig {
api_version: self.api_version.unwrap_or(API_VERSION.to_owned()),
http_client: self.http_client,
base_path: self.base_path.unwrap_or("https://management.azure.com".to_owned()),
token_credential: Some(self.token_credential),
token_credential_resource: self.token_credential_resource.unwrap_or("https://management.azure.com/".to_owned()),
}
}
}
pub struct OperationConfig {
api_version: String,
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
base_path: String,
token_credential: Option<Box<dyn azure_core::TokenCredential>>,
token_credential_resource: String,
}
impl OperationConfig {
pub fn api_version(&self) -> &str {
self.api_version.as_str()
}
pub fn http_client(&self) -> &dyn azure_core::HttpClient {
self.http_client.as_ref()
}
pub fn base_path(&self) -> &str {
self.base_path.as_str()
}
pub fn token_credential(&self) -> Option<&dyn azure_core::TokenCredential> {
self.token_credential.as_deref()
}
pub fn token_credential_resource(&self) -> &str {
self.token_credential_resource.as_str()
}
}
| 38.318182 | 175 | 0.697904 |
9b63a8288191d6823a5d21c68714ec26c77abb30 | 9,075 | use crate::{
buffer::Buffer,
completion::Completer,
editor::{command::Command, event::Event},
history::{EntryCursor, History, Session},
os::{TerminalInput, TerminalOutput},
theme::Theme,
};
use riptide_runtime::{Fiber, Value};
use std::{
fmt::Write,
os::unix::io::AsRawFd,
};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use yansi::Paint;
pub mod command;
pub mod event;
pub mod prompt;
/// Controls the interactive command line editor.
pub struct Editor<I, O: AsRawFd, C> {
stdin: TerminalInput<I>,
stdout: TerminalOutput<O>,
history: History,
history_session: Session,
history_cursor: Option<EntryCursor>,
completer: C,
buffer: Buffer,
}
pub enum ReadLine {
Input(String),
Eof,
}
impl<I, O: AsRawFd, C> Editor<I, O, C> {
pub fn new(stdin: I, stdout: O, history: History, session: Session, completer: C) -> Self {
Self {
stdin: TerminalInput::new(stdin),
stdout: TerminalOutput::new(stdout).unwrap(),
history,
history_session: session,
history_cursor: None,
completer,
buffer: Buffer::new(),
}
}
// TODO: Determine how this is configured.
fn get_theme(&self) -> Theme {
Theme::default()
}
async fn get_prompt_str(&self, fiber: &mut Fiber) -> String {
match fiber.globals().get("riptide-prompt") {
// Static prompt.
Value::String(ref s) => return s.to_string(),
// Prompt is determined by a callback function.
value @ Value::Block(_) => match fiber.invoke(&value, &[]).await {
// Closure returned successfully.
Ok(Value::String(ref s)) => return s.to_string(),
// Closure succeeded, but returned an invalid data type.
Ok(value) => {
log::warn!("prompt function returned invalid data type: {}", value.type_name());
}
Err(e) => {
log::warn!("prompt function threw exception: {}", e);
}
},
Value::Nil => {
// Unspecified
}
value => {
// Invalid data type
log::warn!("prompt must be a closure or string, not '{}'", value.type_name());
}
}
let theme = self.get_theme();
let mut buf = String::new();
let cwd = fiber.current_dir().to_string();
write!(
&mut buf,
"{}{}",
Paint::blue(theme.prompt.as_ref().unwrap().item_format.as_ref().unwrap().replace("%s", &cwd)),
theme.prompt.as_ref().unwrap().item_separator.as_ref().unwrap(),
).unwrap();
write!(
&mut buf,
"{} ",
theme.prompt.as_ref().unwrap().format.as_ref().unwrap(),
).unwrap();
buf
}
}
impl<I: AsyncRead + Unpin, O: AsyncWrite + AsRawFd + Unpin, C: Completer> Editor<I, O, C> {
/// Show a command prompt to the user and await for the user to input a
/// command. The typed command is returned once submitted.
pub async fn read_line(&mut self, fiber: &mut Fiber) -> ReadLine {
let prompt = self.get_prompt_str(fiber).await;
self.stdout.write_all(prompt.as_bytes()).await.unwrap();
self.stdout.flush().await.unwrap();
let mut editor = scopeguard::guard(self, |editor| {
editor.stdout.set_raw_mode(false).unwrap();
});
// Enter raw mode.
editor.stdout.set_raw_mode(true).unwrap();
// Handle keyboard events.
while let Ok(event) = editor.stdin.next_event().await {
log::trace!("event: {:?}", event);
match event {
Event::Char('\n') => {
editor.stdout.write_all(b"\r\n").await.unwrap();
if !editor.buffer.text().is_empty() {
break;
}
}
Event::Left | Event::Ctrl('b') => {
editor.buffer.move_cursor_relative(-1);
}
Event::Right | Event::Ctrl('f') => {
if editor.buffer.cursor_is_at_end_of_line() {
// If the cursor is already at the end of the line, then
// fill in the current suggested command, if any.
// TODO: Only compute suggestion one time each event.
if let Some(suggestion) = editor.completer.complete_one(editor.buffer.text()) {
if let Some(suffix) = suggestion.strip_prefix(editor.buffer.text()) {
if !suffix.is_empty() {
editor.buffer.insert_str(suffix);
}
}
}
} else {
// Advance the cursor right as normal.
editor.buffer.move_cursor_relative(1);
}
}
Event::Up => {
let history = editor.history.clone();
match editor.history_cursor.get_or_insert_with(|| history.entries()).next() {
Some(entry) => {
// TODO: Save buffer for later if user wants to return to
// what they typed.
editor.buffer.clear();
editor.buffer.insert_str(entry.command());
}
None => {
// TODO
}
}
}
Event::Down => {
if let Some(mut cursor) = editor.history_cursor.take() {
editor.buffer.clear();
if let Some(entry) = cursor.prev() {
editor.buffer.insert_str(entry.command());
editor.history_cursor = Some(cursor);
}
}
// TODO: Restore original buffer
}
Event::Home | Event::Ctrl('a') => {
editor.buffer.move_to_start_of_line();
}
Event::End | Event::Ctrl('e') => {
editor.buffer.move_to_end_of_line();
}
Event::Char(c) => {
editor.buffer.insert_char(c);
}
Event::Backspace => {
editor.buffer.delete_before_cursor();
}
Event::Delete => {
editor.buffer.delete_after_cursor();
}
Event::Ctrl('c') => {
editor.buffer.clear();
}
Event::Ctrl('d') | Event::Eof => {
if editor.buffer.is_empty() {
return ReadLine::Eof;
}
}
_ => {}
}
editor.redraw(fiber).await;
}
editor.history_cursor = None;
// Record line to history.
editor.history_session.add(editor.buffer.text());
// Move the command line out of out buffer and return it.
ReadLine::Input(editor.buffer.take_text())
}
/// Redraw the buffer.
pub async fn redraw(&mut self, fiber: &mut Fiber) {
let prompt = self.get_prompt_str(fiber).await;
// Render the current buffer text.
self.stdout.write_all(b"\r").await.unwrap();
self.stdout
.command(Command::ClearAfterCursor)
.await
.unwrap();
self.stdout
.write_all(format!("{}{}", prompt, self.buffer.text()).as_bytes())
.await
.unwrap();
// Render the top completion suggestion.
if !self.buffer.is_empty() {
if let Some(suggestion) = self.completer.complete_one(self.buffer.text()) {
if let Some(suffix) = suggestion.strip_prefix(self.buffer.text()) {
if !suffix.is_empty() {
self.stdout
.write_all(format!("{}", Paint::new(suffix).dimmed()).as_bytes())
.await
.unwrap();
self.stdout
.command(Command::MoveCursorLeft(suffix.len()))
.await
.unwrap();
}
}
}
}
// Update the cursor position.
let diff = self.buffer.text().len() - self.buffer.cursor();
if diff > 0 {
self.stdout
.command(Command::MoveCursorLeft(diff))
.await
.unwrap();
}
// Flush all changes from the IO buffer.
self.stdout.flush().await.unwrap();
}
}
| 34.245283 | 106 | 0.466997 |
6abd4f9e073664a4386ca335aa82407fb63e3ee2 | 21,015 | use cfg_if::cfg_if;
use foreign_types::ForeignType;
use foreign_types::ForeignTypeRef;
#[cfg(any(ossl111, not(osslconf = "OPENSSL_NO_PSK")))]
use libc::c_char;
#[cfg(ossl111)]
use libc::size_t;
use libc::{c_int, c_uchar, c_uint, c_void};
#[cfg(any(ossl111, not(osslconf = "OPENSSL_NO_PSK")))]
use std::ffi::CStr;
use std::mem;
use std::ptr;
use std::slice;
#[cfg(ossl111)]
use std::str;
use std::sync::Arc;
use crate::dh::Dh;
#[cfg(all(ossl101, not(ossl110)))]
use crate::ec::EcKey;
use crate::error::ErrorStack;
use crate::pkey::Params;
#[cfg(any(ossl102, libressl261))]
use crate::ssl::AlpnError;
use crate::ssl::{
try_get_session_ctx_index, SniError, Ssl, SslAlert, SslContext, SslContextRef, SslRef,
SslSession, SslSessionRef,
};
#[cfg(ossl111)]
use crate::ssl::{ClientHelloResponse, ExtensionContext};
use crate::ssl::{SslAlertInformationCode, SslAlertInformationContext};
use crate::util::ForeignTypeRefExt;
#[cfg(ossl111)]
use crate::x509::X509Ref;
use crate::x509::{X509StoreContext, X509StoreContextRef};
pub extern "C" fn raw_verify<F>(preverify_ok: c_int, x509_ctx: *mut ffi::X509_STORE_CTX) -> c_int
where
F: Fn(bool, &mut X509StoreContextRef) -> bool + 'static + Sync + Send,
{
unsafe {
let ctx = X509StoreContextRef::from_ptr_mut(x509_ctx);
let ssl_idx = X509StoreContext::ssl_idx().expect("BUG: store context ssl index missing");
let verify_idx = SslContext::cached_ex_index::<F>();
// raw pointer shenanigans to break the borrow of ctx
// the callback can't mess with its own ex_data slot so this is safe
let verify = ctx
.ex_data(ssl_idx)
.expect("BUG: store context missing ssl")
.ssl_context()
.ex_data(verify_idx)
.expect("BUG: verify callback missing") as *const F;
(*verify)(preverify_ok != 0, ctx) as c_int
}
}
#[cfg(not(osslconf = "OPENSSL_NO_PSK"))]
pub extern "C" fn raw_client_psk<F>(
ssl: *mut ffi::SSL,
hint: *const c_char,
identity: *mut c_char,
max_identity_len: c_uint,
psk: *mut c_uchar,
max_psk_len: c_uint,
) -> c_uint
where
F: Fn(&mut SslRef, Option<&[u8]>, &mut [u8], &mut [u8]) -> Result<usize, ErrorStack>
+ 'static
+ Sync
+ Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback_idx = SslContext::cached_ex_index::<F>();
let callback = ssl
.ssl_context()
.ex_data(callback_idx)
.expect("BUG: psk callback missing") as *const F;
let hint = if !hint.is_null() {
Some(CStr::from_ptr(hint).to_bytes())
} else {
None
};
// Give the callback mutable slices into which it can write the identity and psk.
let identity_sl = slice::from_raw_parts_mut(identity as *mut u8, max_identity_len as usize);
let psk_sl = slice::from_raw_parts_mut(psk as *mut u8, max_psk_len as usize);
match (*callback)(ssl, hint, identity_sl, psk_sl) {
Ok(psk_len) => psk_len as u32,
Err(e) => {
e.put();
0
}
}
}
}
#[cfg(not(osslconf = "OPENSSL_NO_PSK"))]
pub extern "C" fn raw_server_psk<F>(
ssl: *mut ffi::SSL,
identity: *const c_char,
psk: *mut c_uchar,
max_psk_len: c_uint,
) -> c_uint
where
F: Fn(&mut SslRef, Option<&[u8]>, &mut [u8]) -> Result<usize, ErrorStack>
+ 'static
+ Sync
+ Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback_idx = SslContext::cached_ex_index::<F>();
let callback = ssl
.ssl_context()
.ex_data(callback_idx)
.expect("BUG: psk callback missing") as *const F;
let identity = if identity.is_null() {
None
} else {
Some(CStr::from_ptr(identity).to_bytes())
};
// Give the callback mutable slices into which it can write the psk.
let psk_sl = slice::from_raw_parts_mut(psk as *mut u8, max_psk_len as usize);
match (*callback)(ssl, identity, psk_sl) {
Ok(psk_len) => psk_len as u32,
Err(e) => {
e.put();
0
}
}
}
}
pub extern "C" fn ssl_raw_verify<F>(
preverify_ok: c_int,
x509_ctx: *mut ffi::X509_STORE_CTX,
) -> c_int
where
F: Fn(bool, &mut X509StoreContextRef) -> bool + 'static + Sync + Send,
{
unsafe {
let ctx = X509StoreContextRef::from_ptr_mut(x509_ctx);
let ssl_idx = X509StoreContext::ssl_idx().expect("BUG: store context ssl index missing");
let callback_idx = Ssl::cached_ex_index::<Arc<F>>();
let callback = ctx
.ex_data(ssl_idx)
.expect("BUG: store context missing ssl")
.ex_data(callback_idx)
.expect("BUG: ssl verify callback missing")
.clone();
callback(preverify_ok != 0, ctx) as c_int
}
}
pub extern "C" fn raw_sni<F>(ssl: *mut ffi::SSL, al: *mut c_int, arg: *mut c_void) -> c_int
where
F: Fn(&mut SslRef, &mut SslAlert) -> Result<(), SniError> + 'static + Sync + Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback = arg as *const F;
let mut alert = SslAlert(*al);
let r = (*callback)(ssl, &mut alert);
*al = alert.0;
match r {
Ok(()) => ffi::SSL_TLSEXT_ERR_OK,
Err(e) => e.0,
}
}
}
#[cfg(any(ossl102, libressl261))]
pub extern "C" fn raw_alpn_select<F>(
ssl: *mut ffi::SSL,
out: *mut *const c_uchar,
outlen: *mut c_uchar,
inbuf: *const c_uchar,
inlen: c_uint,
_arg: *mut c_void,
) -> c_int
where
F: for<'a> Fn(&mut SslRef, &'a [u8]) -> Result<&'a [u8], AlpnError> + 'static + Sync + Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: alpn callback missing") as *const F;
let protos = slice::from_raw_parts(inbuf as *const u8, inlen as usize);
match (*callback)(ssl, protos) {
Ok(proto) => {
*out = proto.as_ptr() as *const c_uchar;
*outlen = proto.len() as c_uchar;
ffi::SSL_TLSEXT_ERR_OK
}
Err(e) => e.0,
}
}
}
pub unsafe extern "C" fn raw_info<F>(ssl: *const ffi::SSL, type_: c_int, val: c_int)
where
F: Fn(&SslRef, SslAlertInformationContext, SslAlertInformationCode) + 'static + Sync + Send,
{
let ssl = SslRef::from_const_ptr(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: ssl context info callback missing") as *const F;
(*callback)(
ssl,
SslAlertInformationContext { bits: type_ },
SslAlertInformationCode(val),
)
}
pub unsafe extern "C" fn raw_info_ssl<F>(ssl: *const ffi::SSL, type_: c_int, val: c_int)
where
F: Fn(&SslRef, SslAlertInformationContext, SslAlertInformationCode) + 'static + Sync + Send,
{
let ssl = SslRef::from_const_ptr(ssl);
let callback = ssl
.ex_data(Ssl::cached_ex_index::<Arc<F>>())
.expect("BUG: ssl info callback missing")
.clone();
(*callback)(
ssl,
SslAlertInformationContext { bits: type_ },
SslAlertInformationCode(val),
)
}
pub unsafe extern "C" fn raw_tmp_dh<F>(
ssl: *mut ffi::SSL,
is_export: c_int,
keylength: c_int,
) -> *mut ffi::DH
where
F: Fn(&mut SslRef, bool, u32) -> Result<Dh<Params>, ErrorStack> + 'static + Sync + Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: tmp dh callback missing") as *const F;
match (*callback)(ssl, is_export != 0, keylength as u32) {
Ok(dh) => {
let ptr = dh.as_ptr();
mem::forget(dh);
ptr
}
Err(e) => {
e.put();
ptr::null_mut()
}
}
}
#[cfg(all(ossl101, not(ossl110)))]
pub unsafe extern "C" fn raw_tmp_ecdh<F>(
ssl: *mut ffi::SSL,
is_export: c_int,
keylength: c_int,
) -> *mut ffi::EC_KEY
where
F: Fn(&mut SslRef, bool, u32) -> Result<EcKey<Params>, ErrorStack> + 'static + Sync + Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: tmp ecdh callback missing") as *const F;
match (*callback)(ssl, is_export != 0, keylength as u32) {
Ok(ec_key) => {
let ptr = ec_key.as_ptr();
mem::forget(ec_key);
ptr
}
Err(e) => {
e.put();
ptr::null_mut()
}
}
}
pub unsafe extern "C" fn raw_tmp_dh_ssl<F>(
ssl: *mut ffi::SSL,
is_export: c_int,
keylength: c_int,
) -> *mut ffi::DH
where
F: Fn(&mut SslRef, bool, u32) -> Result<Dh<Params>, ErrorStack> + 'static + Sync + Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ex_data(Ssl::cached_ex_index::<Arc<F>>())
.expect("BUG: ssl tmp dh callback missing")
.clone();
match callback(ssl, is_export != 0, keylength as u32) {
Ok(dh) => {
let ptr = dh.as_ptr();
mem::forget(dh);
ptr
}
Err(e) => {
e.put();
ptr::null_mut()
}
}
}
#[cfg(all(ossl101, not(ossl110)))]
pub unsafe extern "C" fn raw_tmp_ecdh_ssl<F>(
ssl: *mut ffi::SSL,
is_export: c_int,
keylength: c_int,
) -> *mut ffi::EC_KEY
where
F: Fn(&mut SslRef, bool, u32) -> Result<EcKey<Params>, ErrorStack> + 'static + Sync + Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ex_data(Ssl::cached_ex_index::<Arc<F>>())
.expect("BUG: ssl tmp ecdh callback missing")
.clone();
match callback(ssl, is_export != 0, keylength as u32) {
Ok(ec_key) => {
let ptr = ec_key.as_ptr();
mem::forget(ec_key);
ptr
}
Err(e) => {
e.put();
ptr::null_mut()
}
}
}
pub unsafe extern "C" fn raw_tlsext_status<F>(ssl: *mut ffi::SSL, _: *mut c_void) -> c_int
where
F: Fn(&mut SslRef) -> Result<bool, ErrorStack> + 'static + Sync + Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: ocsp callback missing") as *const F;
let ret = (*callback)(ssl);
if ssl.is_server() {
match ret {
Ok(true) => ffi::SSL_TLSEXT_ERR_OK,
Ok(false) => ffi::SSL_TLSEXT_ERR_NOACK,
Err(e) => {
e.put();
ffi::SSL_TLSEXT_ERR_ALERT_FATAL
}
}
} else {
match ret {
Ok(true) => 1,
Ok(false) => 0,
Err(e) => {
e.put();
-1
}
}
}
}
pub unsafe extern "C" fn raw_new_session<F>(
ssl: *mut ffi::SSL,
session: *mut ffi::SSL_SESSION,
) -> c_int
where
F: Fn(&mut SslRef, SslSession) + 'static + Sync + Send,
{
let session_ctx_index =
try_get_session_ctx_index().expect("BUG: session context index initialization failed");
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ex_data(*session_ctx_index)
.expect("BUG: session context missing")
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: new session callback missing") as *const F;
let session = SslSession::from_ptr(session);
(*callback)(ssl, session);
// the return code doesn't indicate error vs success, but whether or not we consumed the session
1
}
pub unsafe extern "C" fn raw_remove_session<F>(
ctx: *mut ffi::SSL_CTX,
session: *mut ffi::SSL_SESSION,
) where
F: Fn(&SslContextRef, &SslSessionRef) + 'static + Sync + Send,
{
let ctx = SslContextRef::from_ptr(ctx);
let callback = ctx
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: remove session callback missing");
let session = SslSessionRef::from_ptr(session);
callback(ctx, session)
}
cfg_if! {
if #[cfg(any(ossl110, libressl280))] {
type DataPtr = *const c_uchar;
} else {
type DataPtr = *mut c_uchar;
}
}
pub unsafe extern "C" fn raw_get_session<F>(
ssl: *mut ffi::SSL,
data: DataPtr,
len: c_int,
copy: *mut c_int,
) -> *mut ffi::SSL_SESSION
where
F: Fn(&mut SslRef, &[u8]) -> Option<SslSession> + 'static + Sync + Send,
{
let session_ctx_index =
try_get_session_ctx_index().expect("BUG: session context index initialization failed");
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ex_data(*session_ctx_index)
.expect("BUG: session context missing")
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: get session callback missing") as *const F;
let data = slice::from_raw_parts(data as *const u8, len as usize);
match (*callback)(ssl, data) {
Some(session) => {
let p = session.as_ptr();
mem::forget(session);
*copy = 0;
p
}
None => ptr::null_mut(),
}
}
#[cfg(ossl111)]
pub unsafe extern "C" fn raw_keylog<F>(ssl: *const ffi::SSL, line: *const c_char)
where
F: Fn(&SslRef, &str) + 'static + Sync + Send,
{
let ssl = SslRef::from_const_ptr(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: get session callback missing");
let line = CStr::from_ptr(line).to_bytes();
let line = str::from_utf8_unchecked(line);
callback(ssl, line);
}
#[cfg(ossl111)]
pub unsafe extern "C" fn raw_stateless_cookie_generate<F>(
ssl: *mut ffi::SSL,
cookie: *mut c_uchar,
cookie_len: *mut size_t,
) -> c_int
where
F: Fn(&mut SslRef, &mut [u8]) -> Result<usize, ErrorStack> + 'static + Sync + Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: stateless cookie generate callback missing") as *const F;
let slice = slice::from_raw_parts_mut(cookie as *mut u8, ffi::SSL_COOKIE_LENGTH as usize);
match (*callback)(ssl, slice) {
Ok(len) => {
*cookie_len = len as size_t;
1
}
Err(e) => {
e.put();
0
}
}
}
#[cfg(ossl111)]
pub unsafe extern "C" fn raw_stateless_cookie_verify<F>(
ssl: *mut ffi::SSL,
cookie: *const c_uchar,
cookie_len: size_t,
) -> c_int
where
F: Fn(&mut SslRef, &[u8]) -> bool + 'static + Sync + Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: stateless cookie verify callback missing") as *const F;
let slice = slice::from_raw_parts(cookie as *const c_uchar as *const u8, cookie_len as usize);
(*callback)(ssl, slice) as c_int
}
pub extern "C" fn raw_cookie_generate<F>(
ssl: *mut ffi::SSL,
cookie: *mut c_uchar,
cookie_len: *mut c_uint,
) -> c_int
where
F: Fn(&mut SslRef, &mut [u8]) -> Result<usize, ErrorStack> + 'static + Sync + Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: cookie generate callback missing") as *const F;
// We subtract 1 from DTLS1_COOKIE_LENGTH as the ostensible value, 256, is erroneous but retained for
// compatibility. See comments in dtls1.h.
let slice =
slice::from_raw_parts_mut(cookie as *mut u8, ffi::DTLS1_COOKIE_LENGTH as usize - 1);
match (*callback)(ssl, slice) {
Ok(len) => {
*cookie_len = len as c_uint;
1
}
Err(e) => {
e.put();
0
}
}
}
}
cfg_if! {
if #[cfg(any(ossl110, libressl280))] {
type CookiePtr = *const c_uchar;
} else {
type CookiePtr = *mut c_uchar;
}
}
pub extern "C" fn raw_cookie_verify<F>(
ssl: *mut ffi::SSL,
cookie: CookiePtr,
cookie_len: c_uint,
) -> c_int
where
F: Fn(&mut SslRef, &[u8]) -> bool + 'static + Sync + Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: cookie verify callback missing") as *const F;
let slice =
slice::from_raw_parts(cookie as *const c_uchar as *const u8, cookie_len as usize);
(*callback)(ssl, slice) as c_int
}
}
#[cfg(ossl111)]
pub struct CustomExtAddState<T>(Option<T>);
#[cfg(ossl111)]
pub extern "C" fn raw_custom_ext_add<F, T>(
ssl: *mut ffi::SSL,
_: c_uint,
context: c_uint,
out: *mut *const c_uchar,
outlen: *mut size_t,
x: *mut ffi::X509,
chainidx: size_t,
al: *mut c_int,
_: *mut c_void,
) -> c_int
where
F: Fn(&mut SslRef, ExtensionContext, Option<(usize, &X509Ref)>) -> Result<Option<T>, SslAlert>
+ 'static
+ Sync
+ Send,
T: AsRef<[u8]> + 'static + Sync + Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: custom ext add callback missing") as *const F;
let ectx = ExtensionContext::from_bits_truncate(context);
let cert = if ectx.contains(ExtensionContext::TLS1_3_CERTIFICATE) {
Some((chainidx, X509Ref::from_ptr(x)))
} else {
None
};
match (*callback)(ssl, ectx, cert) {
Ok(None) => 0,
Ok(Some(buf)) => {
*outlen = buf.as_ref().len();
*out = buf.as_ref().as_ptr();
let idx = Ssl::cached_ex_index::<CustomExtAddState<T>>();
let mut buf = Some(buf);
let new = match ssl.ex_data_mut(idx) {
Some(state) => {
state.0 = buf.take();
false
}
None => true,
};
if new {
ssl.set_ex_data(idx, CustomExtAddState(buf));
}
1
}
Err(alert) => {
*al = alert.0;
-1
}
}
}
}
#[cfg(ossl111)]
pub extern "C" fn raw_custom_ext_free<T>(
ssl: *mut ffi::SSL,
_: c_uint,
_: c_uint,
_: *mut *const c_uchar,
_: *mut c_void,
) where
T: 'static + Sync + Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let idx = Ssl::cached_ex_index::<CustomExtAddState<T>>();
if let Some(state) = ssl.ex_data_mut(idx) {
state.0 = None;
}
}
}
#[cfg(ossl111)]
pub extern "C" fn raw_custom_ext_parse<F>(
ssl: *mut ffi::SSL,
_: c_uint,
context: c_uint,
input: *const c_uchar,
inlen: size_t,
x: *mut ffi::X509,
chainidx: size_t,
al: *mut c_int,
_: *mut c_void,
) -> c_int
where
F: Fn(&mut SslRef, ExtensionContext, &[u8], Option<(usize, &X509Ref)>) -> Result<(), SslAlert>
+ 'static
+ Sync
+ Send,
{
unsafe {
let ssl = SslRef::from_ptr_mut(ssl);
let callback = ssl
.ssl_context()
.ex_data(SslContext::cached_ex_index::<F>())
.expect("BUG: custom ext parse callback missing") as *const F;
let ectx = ExtensionContext::from_bits_truncate(context);
let slice = slice::from_raw_parts(input as *const u8, inlen as usize);
let cert = if ectx.contains(ExtensionContext::TLS1_3_CERTIFICATE) {
Some((chainidx, X509Ref::from_ptr(x)))
} else {
None
};
match (*callback)(ssl, ectx, slice, cert) {
Ok(()) => 1,
Err(alert) => {
*al = alert.0;
0
}
}
}
}
#[cfg(ossl111)]
pub unsafe extern "C" fn raw_client_hello<F>(
ssl: *mut ffi::SSL,
al: *mut c_int,
arg: *mut c_void,
) -> c_int
where
F: Fn(&mut SslRef, &mut SslAlert) -> Result<ClientHelloResponse, ErrorStack>
+ 'static
+ Sync
+ Send,
{
let ssl = SslRef::from_ptr_mut(ssl);
let callback = arg as *const F;
let mut alert = SslAlert(*al);
let r = (*callback)(ssl, &mut alert);
*al = alert.0;
match r {
Ok(c) => c.0,
Err(e) => {
e.put();
ffi::SSL_CLIENT_HELLO_ERROR
}
}
}
| 28.787671 | 109 | 0.5596 |
b95ce290548c95b6055ec2a1fb255a343a3bfc2a | 20,079 | // Copyright 2020. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
output_manager_service::TxId,
transaction_service::{
error::{TransactionServiceError, TransactionServiceProtocolError},
handle::TransactionEvent,
service::TransactionServiceResources,
storage::{
database::TransactionBackend,
models::{CompletedTransaction, InboundTransaction, TransactionDirection, TransactionStatus},
},
tasks::send_transaction_reply::send_transaction_reply,
},
};
use chrono::Utc;
use futures::{
channel::{mpsc, oneshot},
future::FutureExt,
StreamExt,
};
use log::*;
use rand::rngs::OsRng;
use std::sync::Arc;
use tari_comms::types::CommsPublicKey;
use tari_core::transactions::{
transaction::{OutputFeatures, Transaction},
transaction_protocol::{recipient::RecipientState, sender::TransactionSenderMessage},
types::PrivateKey,
ReceiverTransactionProtocol,
};
use tari_crypto::keys::SecretKey;
use tokio::time::delay_for;
const LOG_TARGET: &str = "wallet::transaction_service::protocols::receive_protocol";
const LOG_TARGET_STRESS: &str = "stress_test::receive_protocol";
#[derive(Debug, PartialEq)]
pub enum TransactionReceiveProtocolStage {
Initial,
WaitForFinalize,
}
pub struct TransactionReceiveProtocol<TBackend>
where TBackend: TransactionBackend + 'static
{
id: u64,
source_pubkey: CommsPublicKey,
sender_message: TransactionSenderMessage,
stage: TransactionReceiveProtocolStage,
resources: TransactionServiceResources<TBackend>,
transaction_finalize_receiver: Option<mpsc::Receiver<(CommsPublicKey, TxId, Transaction)>>,
cancellation_receiver: Option<oneshot::Receiver<()>>,
}
impl<TBackend> TransactionReceiveProtocol<TBackend>
where TBackend: TransactionBackend + 'static
{
pub fn new(
id: u64,
source_pubkey: CommsPublicKey,
sender_message: TransactionSenderMessage,
stage: TransactionReceiveProtocolStage,
resources: TransactionServiceResources<TBackend>,
transaction_finalize_receiver: mpsc::Receiver<(CommsPublicKey, TxId, Transaction)>,
cancellation_receiver: oneshot::Receiver<()>,
) -> Self
{
Self {
id,
source_pubkey,
sender_message,
stage,
resources,
transaction_finalize_receiver: Some(transaction_finalize_receiver),
cancellation_receiver: Some(cancellation_receiver),
}
}
pub async fn execute(mut self) -> Result<u64, TransactionServiceProtocolError> {
info!(
target: LOG_TARGET,
"Starting Transaction Receive protocol for TxId: {} at Stage {:?}", self.id, self.stage
);
match self.stage {
TransactionReceiveProtocolStage::Initial => {
self.accept_transaction().await?;
self.wait_for_finalization().await?;
},
TransactionReceiveProtocolStage::WaitForFinalize => {
self.wait_for_finalization().await?;
},
}
Ok(self.id)
}
async fn accept_transaction(&mut self) -> Result<(), TransactionServiceProtocolError> {
// Currently we will only reply to a Single sender transaction protocol
if let TransactionSenderMessage::Single(data) = self.sender_message.clone() {
// Check this is not a repeat message i.e. tx_id doesn't already exist in our pending or completed
// transactions
if self
.resources
.db
.transaction_exists(data.tx_id)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?
{
trace!(
target: LOG_TARGET,
"Received Transaction (TxId: {}) already present in database.",
data.tx_id,
);
return Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::RepeatedMessageError,
));
}
let amount = data.amount;
let spending_key = self
.resources
.output_manager_service
.get_recipient_spending_key(data.tx_id, data.amount)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
let nonce = PrivateKey::random(&mut OsRng);
let rtp = ReceiverTransactionProtocol::new(
self.sender_message.clone(),
nonce,
spending_key,
OutputFeatures::default(),
&self.resources.factories,
);
let inbound_transaction = InboundTransaction::new(
data.tx_id,
self.source_pubkey.clone(),
amount,
rtp,
TransactionStatus::Pending,
data.message.clone(),
Utc::now().naive_utc(),
);
let send_result = send_transaction_reply(
inbound_transaction.clone(),
self.resources.outbound_message_service.clone(),
self.resources.config.direct_send_timeout,
)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, e))?;
self.resources
.db
.add_pending_inbound_transaction(inbound_transaction.tx_id, inbound_transaction)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
self.resources
.db
.increment_send_count(self.id)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
if !send_result {
error!(
target: LOG_TARGET,
"Transaction with TX_ID = {} received from {}. Reply could not be sent!",
data.tx_id,
self.source_pubkey,
);
} else {
info!(
target: LOG_TARGET,
"Transaction with TX_ID = {} received from {}. Reply Sent", data.tx_id, self.source_pubkey,
);
}
trace!(
target: LOG_TARGET,
"Transaction (TX_ID: {}) - Amount: {} - Message: {}",
data.tx_id,
amount,
data.message,
);
let _ = self
.resources
.event_publisher
.send(Arc::new(TransactionEvent::ReceivedTransaction(data.tx_id)))
.map_err(|e| {
trace!(target: LOG_TARGET, "Error sending event due to no subscribers: {:?}", e);
e
});
Ok(())
} else {
Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::InvalidStateError,
))
}
}
async fn wait_for_finalization(&mut self) -> Result<(), TransactionServiceProtocolError> {
let mut receiver = self
.transaction_finalize_receiver
.take()
.ok_or_else(|| TransactionServiceProtocolError::new(self.id, TransactionServiceError::InvalidStateError))?;
let mut cancellation_receiver = self
.cancellation_receiver
.take()
.ok_or_else(|| TransactionServiceProtocolError::new(self.id, TransactionServiceError::InvalidStateError))?
.fuse();
let inbound_tx = match self.resources.db.get_pending_inbound_transaction(self.id).await {
Ok(tx) => tx,
Err(_e) => {
debug!(
target: LOG_TARGET,
"TxId for received Finalized Transaction does not exist in Pending Inbound Transactions, could be \
a repeat Store and Forward message"
);
return Ok(());
},
};
// Determine the time remaining before this transaction times out
let elapsed_time = Utc::now()
.naive_utc()
.signed_duration_since(inbound_tx.timestamp)
.to_std()
.map_err(|_| {
TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::ConversionError("duration::OutOfRangeError".to_string()),
)
})?;
let timeout_duration = match self
.resources
.config
.pending_transaction_cancellation_timeout
.checked_sub(elapsed_time)
{
None => {
// This will cancel the transaction and exit this protocol
return self.timeout_transaction().await;
},
Some(t) => t,
};
let mut timeout_delay = delay_for(timeout_duration).fuse();
// check to see if a resend is due
let resend = match inbound_tx.last_send_timestamp {
None => true,
Some(timestamp) => {
let elapsed_time = Utc::now()
.naive_utc()
.signed_duration_since(timestamp)
.to_std()
.map_err(|_| {
TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::ConversionError("duration::OutOfRangeError".to_string()),
)
})?;
elapsed_time > self.resources.config.transaction_resend_period
},
};
if resend {
if let Err(e) = send_transaction_reply(
inbound_tx.clone(),
self.resources.outbound_message_service.clone(),
self.resources.config.direct_send_timeout,
)
.await
{
warn!(
target: LOG_TARGET,
"Error resending Transaction Reply (TxId: {}): {:?}", self.id, e
);
}
self.resources
.db
.increment_send_count(self.id)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
}
let mut shutdown = self.resources.shutdown_signal.clone();
#[allow(unused_assignments)]
let mut incoming_finalized_transaction = None;
loop {
loop {
let mut resend_timeout = delay_for(self.resources.config.transaction_resend_period).fuse();
futures::select! {
(spk, tx_id, tx) = receiver.select_next_some() => {
incoming_finalized_transaction = Some(tx);
if inbound_tx.source_public_key != spk {
warn!(
target: LOG_TARGET,
"Finalized Transaction did not come from the expected Public Key"
);
} else if tx_id != inbound_tx.tx_id || tx_id != self.id {
debug!(target: LOG_TARGET, "Finalized Transaction does not have the correct TxId");
} else {
break;
}
},
result = cancellation_receiver => {
if result.is_ok() {
info!(target: LOG_TARGET, "Cancelling Transaction Receive Protocol for TxId: {}", self.id);
return Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::TransactionCancelled,
));
}
},
() = resend_timeout => {
match send_transaction_reply(
inbound_tx.clone(),
self.resources.outbound_message_service.clone(),
self.resources.config.direct_send_timeout,
)
.await {
Ok(_) => self.resources
.db
.increment_send_count(self.id)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?,
Err(e) => warn!(
target: LOG_TARGET,
"Error resending Transaction Reply (TxId: {}): {:?}", self.id, e
),
}
},
() = timeout_delay => {
return self.timeout_transaction().await;
}
_ = shutdown => {
info!(target: LOG_TARGET, "Transaction Receive Protocol (id: {}) shutting down because it received the shutdown signal", self.id);
return Err(TransactionServiceProtocolError::new(self.id, TransactionServiceError::Shutdown))
}
}
}
let finalized_transaction: Transaction = incoming_finalized_transaction.ok_or_else(|| {
TransactionServiceProtocolError::new(self.id, TransactionServiceError::TransactionCancelled)
})?;
info!(
target: LOG_TARGET,
"Finalized Transaction with TX_ID = {} received from {}",
self.id,
self.source_pubkey.clone()
);
debug!(
target: LOG_TARGET_STRESS,
"Finalized Transaction with TX_ID = {} received from {}",
self.id,
self.source_pubkey.clone()
);
let rtp_output = match inbound_tx.receiver_protocol.state.clone() {
RecipientState::Finalized(s) => s.output,
RecipientState::Failed(_) => {
warn!(
target: LOG_TARGET,
"Finalized Transaction TxId: {} is not in the correct state to be completed", self.id
);
return Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::InvalidStateError,
));
},
};
let finalized_outputs = finalized_transaction.body.outputs();
if finalized_outputs.iter().find(|o| o == &&rtp_output).is_none() {
warn!(
target: LOG_TARGET,
"Finalized Transaction does not contain the Receiver's output"
);
continue;
}
let completed_transaction = CompletedTransaction::new(
self.id,
self.source_pubkey.clone(),
self.resources.node_identity.public_key().clone(),
inbound_tx.amount,
finalized_transaction.body.get_total_fee(),
finalized_transaction.clone(),
TransactionStatus::Completed,
inbound_tx.message.clone(),
inbound_tx.timestamp,
TransactionDirection::Inbound,
None,
);
self.resources
.db
.complete_inbound_transaction(self.id, completed_transaction.clone())
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
info!(
target: LOG_TARGET,
"Inbound Transaction with TX_ID = {} from {} moved to Completed Transactions",
self.id,
self.source_pubkey.clone()
);
let _ = self
.resources
.event_publisher
.send(Arc::new(TransactionEvent::ReceivedFinalizedTransaction(self.id)))
.map_err(|e| {
trace!(target: LOG_TARGET, "Error sending event, no subscribers: {:?}", e);
e
});
break;
}
Ok(())
}
async fn timeout_transaction(&mut self) -> Result<(), TransactionServiceProtocolError> {
info!(
target: LOG_TARGET,
"Cancelling Transaction Receive Protocol (TxId: {}) due to timeout after no counterparty response", self.id
);
self.resources
.db
.cancel_pending_transaction(self.id)
.await
.map_err(|e| {
warn!(
target: LOG_TARGET,
"Pending Transaction does not exist and could not be cancelled: {:?}", e
);
TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e))
})?;
self.resources
.output_manager_service
.cancel_transaction(self.id)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
let _ = self
.resources
.event_publisher
.send(Arc::new(TransactionEvent::TransactionCancelled(self.id)))
.map_err(|e| {
trace!(
target: LOG_TARGET,
"Error sending event because there are no subscribers: {:?}",
e
);
TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::BroadcastSendError(format!("{:?}", e)),
)
});
info!(
target: LOG_TARGET,
"Pending Transaction (TxId: {}) timed out after no response from counterparty", self.id
);
Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::Timeout,
))
}
}
| 39.60355 | 154 | 0.53663 |
5ddca4763260d51798018de4ee223feed846a73f | 764 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C lto -C panic=abort -O
// no-prefer-dynamic
fn main() {
foo();
}
#[no_mangle]
#[inline(never)]
fn foo() {
let _a = Box::new(3);
bar();
// CHECK-LABEL: define void @foo
// CHECK: call void @bar
}
#[inline(never)]
#[no_mangle]
fn bar() {
println!("hello!");
}
| 23.875 | 68 | 0.675393 |
648232ea60854bb6bb0ed7657cef928a81b04547 | 385 | #![feature(std_misc)]
#![feature(test)]
extern crate test;
use std::time::duration::Duration;
use std::old_io::timer;
use test::Bencher;
pub fn sleep_ns (ns: i64) {
let interval = Duration::nanoseconds(ns);
timer::sleep(interval);
}
#[test]
fn it_works() {
sleep_ns(1);
}
#[bench]
fn bench_foo (b: &mut test::Bencher) {
b.iter(|| {
sleep_ns(1);
});
}
| 14.259259 | 45 | 0.607792 |
8fb53a54ddbfb97fb77ba1ac8b107c3e70ebbfa0 | 6,623 | use crate::dynamics::{RawIslandManager, RawRigidBodySet};
use crate::geometry::{
RawColliderSet, RawPointColliderProjection, RawRayColliderIntersection, RawRayColliderToi,
RawShape, RawShapeColliderTOI,
};
use crate::math::{RawRotation, RawVector};
use rapier::geometry::{ColliderHandle, Ray};
use rapier::math::Isometry;
use rapier::pipeline::QueryPipeline;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
pub struct RawQueryPipeline(pub(crate) QueryPipeline);
#[wasm_bindgen]
impl RawQueryPipeline {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
RawQueryPipeline(QueryPipeline::new())
}
pub fn update(
&mut self,
islands: &RawIslandManager,
bodies: &RawRigidBodySet,
colliders: &RawColliderSet,
) {
self.0.update(&islands.0, &bodies.0, &colliders.0);
}
pub fn castRay(
&self,
colliders: &RawColliderSet,
rayOrig: &RawVector,
rayDir: &RawVector,
maxToi: f32,
solid: bool,
groups: u32,
) -> Option<RawRayColliderToi> {
let ray = Ray::new(rayOrig.0.into(), rayDir.0);
let (handle, toi) = self.0.cast_ray(
&colliders.0,
&ray,
maxToi,
solid,
crate::geometry::unpack_interaction_groups(groups),
None,
)?;
Some(RawRayColliderToi { handle, toi })
}
pub fn castRayAndGetNormal(
&self,
colliders: &RawColliderSet,
rayOrig: &RawVector,
rayDir: &RawVector,
maxToi: f32,
solid: bool,
groups: u32,
) -> Option<RawRayColliderIntersection> {
let ray = Ray::new(rayOrig.0.into(), rayDir.0);
let (handle, inter) = self.0.cast_ray_and_get_normal(
&colliders.0,
&ray,
maxToi,
solid,
crate::geometry::unpack_interaction_groups(groups),
None,
)?;
Some(RawRayColliderIntersection { handle, inter })
}
// The callback is of type (RawRayColliderIntersection) => bool
pub fn intersectionsWithRay(
&self,
colliders: &RawColliderSet,
rayOrig: &RawVector,
rayDir: &RawVector,
maxToi: f32,
solid: bool,
groups: u32,
callback: &js_sys::Function,
) {
let ray = Ray::new(rayOrig.0.into(), rayDir.0);
let this = JsValue::null();
let rcallback = |handle, inter| {
let result = RawRayColliderIntersection { handle, inter };
match callback.call1(&this, &JsValue::from(result)) {
Err(_) => true,
Ok(val) => val.as_bool().unwrap_or(true),
}
};
self.0.intersections_with_ray(
&colliders.0,
&ray,
maxToi,
solid,
crate::geometry::unpack_interaction_groups(groups),
None,
rcallback,
)
}
pub fn intersectionWithShape(
&self,
colliders: &RawColliderSet,
shapePos: &RawVector,
shapeRot: &RawRotation,
shape: &RawShape,
groups: u32,
) -> Option<u32> {
let pos = Isometry::from_parts(shapePos.0.into(), shapeRot.0);
self.0
.intersection_with_shape(
&colliders.0,
&pos,
&*shape.0,
crate::geometry::unpack_interaction_groups(groups),
None,
)
.map(|h| h.into_raw_parts().0)
}
pub fn projectPoint(
&self,
colliders: &RawColliderSet,
point: &RawVector,
solid: bool,
groups: u32,
) -> Option<RawPointColliderProjection> {
self.0
.project_point(
&colliders.0,
&point.0.into(),
solid,
crate::geometry::unpack_interaction_groups(groups),
None,
)
.map(|(handle, proj)| RawPointColliderProjection { handle, proj })
}
// The callback is of type (u32) => bool
pub fn intersectionsWithPoint(
&self,
colliders: &RawColliderSet,
point: &RawVector,
groups: u32,
callback: &js_sys::Function,
) {
let this = JsValue::null();
let rcallback = |handle: ColliderHandle| match callback
.call1(&this, &JsValue::from(handle.into_raw_parts().0 as u32))
{
Err(_) => true,
Ok(val) => val.as_bool().unwrap_or(true),
};
self.0.intersections_with_point(
&colliders.0,
&point.0.into(),
crate::geometry::unpack_interaction_groups(groups),
None,
rcallback,
)
}
// /// Projects a point on the scene and get
// pub fn projectPointAndGetFeature(
// &self,
// colliders: &ColliderSet,
// point: &Point<Real>,
// groups: InteractionGroups,
// ) -> Option<(ColliderHandle, PointProjection, FeatureId)> {
// }
pub fn castShape(
&self,
colliders: &RawColliderSet,
shapePos: &RawVector,
shapeRot: &RawRotation,
shapeVel: &RawVector,
shape: &RawShape,
maxToi: f32,
groups: u32,
) -> Option<RawShapeColliderTOI> {
let pos = Isometry::from_parts(shapePos.0.into(), shapeRot.0);
self.0
.cast_shape(
&colliders.0,
&pos,
&shapeVel.0,
&*shape.0,
maxToi,
crate::geometry::unpack_interaction_groups(groups),
None,
)
.map(|(handle, toi)| RawShapeColliderTOI { handle, toi })
}
// The callback has type (u32) => boolean
pub fn intersectionsWithShape(
&self,
colliders: &RawColliderSet,
shapePos: &RawVector,
shapeRot: &RawRotation,
shape: &RawShape,
groups: u32,
callback: &js_sys::Function,
) {
let this = JsValue::null();
let rcallback = |handle: ColliderHandle| match callback
.call1(&this, &JsValue::from(handle.into_raw_parts().0 as u32))
{
Err(_) => true,
Ok(val) => val.as_bool().unwrap_or(true),
};
let pos = Isometry::from_parts(shapePos.0.into(), shapeRot.0);
self.0.intersections_with_shape(
&colliders.0,
&pos,
&*shape.0,
crate::geometry::unpack_interaction_groups(groups),
None,
rcallback,
)
}
}
| 28.795652 | 94 | 0.534199 |
eb6cd12454a82045412e3c4ff5829277feb88f1f | 23,609 | use std::ops::Deref;
use std::ops::DerefMut;
use std::hash::Hash;
use std::cmp::Eq;
use std::collections::HashMap;
use std::borrow::Borrow;
use once_cell::sync::Lazy;
use tracing::trace;
use super::EpochCounter;
use super::Epoch;
use super::EpochDeltaChanges;
use super::EpochChanges;
pub trait DualDiff {
/// check if another is different from myself
fn diff(&self, new_value: &Self) -> ChangeFlag;
}
pub static FULL_FILTER: Lazy<ChangeFlag> = Lazy::new(ChangeFlag::all);
pub static SPEC_FILTER: Lazy<ChangeFlag> = Lazy::new(|| ChangeFlag {
spec: true,
status: false,
meta: false,
});
pub static STATUS_FILTER: Lazy<ChangeFlag> = Lazy::new(|| ChangeFlag {
spec: false,
status: true,
meta: false,
});
pub static META_FILTER: Lazy<ChangeFlag> = Lazy::new(|| ChangeFlag {
spec: false,
status: false,
meta: true,
});
/// Filter for metadata change
#[derive(Debug)]
pub struct ChangeFlag {
pub spec: bool,
pub status: bool,
pub meta: bool,
}
impl ChangeFlag {
pub fn all() -> Self {
Self {
spec: true,
status: true,
meta: true,
}
}
/// create no changes
#[inline]
pub fn no_change() -> Self {
Self {
spec: false,
status: false,
meta: false,
}
}
#[inline]
pub fn has_full_change(&self) -> bool {
self.spec && self.status && self.meta
}
/// check if there were any changes
#[inline]
pub fn has_no_changes(&self) -> bool {
!self.spec && !self.status && !self.meta
}
}
/// Keep track of internal changes to object
/// Track 3 different changes (spec,status,meta)
#[derive(Debug, Default, Clone)]
pub struct DualEpochCounter<T> {
spec_epoch: Epoch,
status_epoch: Epoch,
meta_epoch: Epoch,
inner: T,
}
impl<T> DualEpochCounter<T> {
pub fn new(inner: T) -> Self {
Self {
spec_epoch: 0,
status_epoch: 0,
meta_epoch: 0,
inner,
}
}
/// set epoch
fn set_epoch(&mut self, epoch: Epoch) {
self.spec_epoch = epoch;
self.status_epoch = epoch;
self.meta_epoch = epoch;
}
// copy epoch values from old value
fn copy_epoch(&mut self, old: &Self) {
self.spec_epoch = old.spec_epoch;
self.status_epoch = old.status_epoch;
self.meta_epoch = old.meta_epoch;
}
#[inline]
pub fn spec_epoch(&self) -> Epoch {
self.spec_epoch
}
fn set_spec_epoch(&mut self, epoch: Epoch) {
self.spec_epoch = epoch;
}
#[inline]
pub fn status_epoch(&self) -> Epoch {
self.status_epoch
}
fn set_status_epoch(&mut self, epoch: Epoch) {
self.status_epoch = epoch;
}
#[inline]
pub fn meta_epoch(&self) -> Epoch {
self.meta_epoch
}
fn set_meta_epoch(&mut self, epoch: Epoch) {
self.meta_epoch = epoch;
}
#[inline]
pub fn inner(&self) -> &T {
&self.inner
}
pub fn inner_mut(&mut self) -> &mut T {
&mut self.inner
}
pub fn inner_owned(self) -> T {
self.inner
}
}
impl<T> Deref for DualEpochCounter<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> DerefMut for DualEpochCounter<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl<T> From<T> for DualEpochCounter<T> {
fn from(inner: T) -> Self {
Self::new(inner)
}
}
/// Epoch Map with separate mapping
#[derive(Debug, Default)]
pub struct DualEpochMap<K, V> {
epoch: EpochCounter<()>,
fence: EpochCounter<()>, // last changes
values: HashMap<K, DualEpochCounter<V>>,
deleted: Vec<DualEpochCounter<V>>,
}
impl<K, V> Deref for DualEpochMap<K, V> {
type Target = HashMap<K, DualEpochCounter<V>>;
fn deref(&self) -> &Self::Target {
&self.values
}
}
impl<K, V> DerefMut for DualEpochMap<K, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.values
}
}
impl<K, V> DualEpochMap<K, V> {
pub fn increment_epoch(&mut self) {
self.epoch.increment();
}
pub fn decrement_epoch(&mut self) {
self.epoch.decrement();
}
pub fn epoch(&self) -> Epoch {
self.epoch.epoch()
}
}
impl<K, V> DualEpochMap<K, V>
where
V: DualDiff,
K: Eq + Hash,
{
pub fn new() -> Self {
Self::new_with_map(HashMap::new())
}
pub fn new_with_map(values: HashMap<K, DualEpochCounter<V>>) -> Self {
Self {
epoch: EpochCounter::default(),
fence: EpochCounter::default(),
values,
deleted: vec![],
}
}
/// updates the metadata if it is different from existing value
// if this return some then it means replace
// otherwise change occurred
pub fn update(&mut self, key: K, new_value: V) -> Option<ChangeFlag>
where
K: Clone,
{
let mut new_value = DualEpochCounter::new(new_value);
let current_epoch = self.epoch.epoch();
trace!(current_epoch, "updating");
// check each spec and status
if let Some(existing_value) = self.values.get_mut(&key) {
let diff = existing_value.diff(new_value.inner());
trace!("existing diff: {:#?}", diff);
if !diff.has_no_changes() {
new_value.copy_epoch(existing_value);
if diff.spec {
new_value.set_spec_epoch(current_epoch);
}
if diff.status {
new_value.set_status_epoch(current_epoch);
}
if diff.meta {
new_value.set_meta_epoch(current_epoch);
}
*existing_value = new_value;
}
Some(diff)
} else {
// doesn't exist, so everything is new
new_value.set_epoch(current_epoch);
self.values.insert(key, new_value);
None
}
}
/// remove existing value
/// if successful, remove are added to history
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<DualEpochCounter<V>>
where
K: Borrow<Q>,
Q: Hash + Eq,
V: Clone,
{
if let Some((_, mut old_value)) = self.values.remove_entry(k) {
old_value.set_epoch(self.epoch.epoch());
self.deleted.push(old_value.clone());
Some(old_value)
} else {
None
}
}
/// fence history to current epoch,
/// older before fence will be lost
pub fn mark_fence(&mut self) {
self.deleted = vec![];
self.fence = self.epoch.clone();
}
}
impl<K, V> DualEpochMap<K, V>
where
K: Clone,
{
pub fn clone_keys(&self) -> Vec<K> {
self.keys().cloned().collect()
}
}
impl<K, V> DualEpochMap<K, V>
where
V: Clone,
K: Clone,
{
pub fn clone_values(&self) -> Vec<V> {
self.values().cloned().map(|c| c.inner_owned()).collect()
}
/// find all spec changes given epoch
/// if epoch is before fence, return full changes with epoch,
/// otherwise return delta changes
/// user should keep that epoch and do subsequent changes
pub fn spec_changes_since<E>(&self, epoch_value: E) -> EpochChanges<V>
where
Epoch: From<E>,
{
let epoch = epoch_value.into();
self.changes_since_with_filter(epoch, &SPEC_FILTER)
}
/// find all status changes
pub fn status_changes_since<E>(&self, epoch_value: E) -> EpochChanges<V>
where
Epoch: From<E>,
{
let epoch = epoch_value.into();
self.changes_since_with_filter(epoch, &STATUS_FILTER)
}
pub fn meta_changes_since<E>(&self, epoch_value: E) -> EpochChanges<V>
where
Epoch: From<E>,
{
let epoch = epoch_value.into();
self.changes_since_with_filter(epoch, &META_FILTER)
}
/// all changes (spec and status) since epoch
pub fn changes_since<E>(&self, epoch_value: E) -> EpochChanges<V>
where
Epoch: From<E>,
{
let epoch = epoch_value.into();
self.changes_since_with_filter(epoch, &FULL_FILTER)
}
/// find all status changes, only updates are accounted for
pub fn changes_since_with_filter(&self, epoch: Epoch, filter: &ChangeFlag) -> EpochChanges<V> {
if epoch < self.fence.epoch() {
return EpochChanges::new(
self.epoch.epoch(),
EpochDeltaChanges::SyncAll(self.clone_values()),
);
}
if epoch == self.epoch() {
return EpochChanges::new(self.epoch.epoch(), EpochDeltaChanges::empty());
}
let updates: Vec<V> = self
.values()
.filter_map(|v| {
if filter.spec && v.spec_epoch > epoch
|| filter.status && v.status_epoch > epoch
|| filter.meta && v.meta_epoch > epoch
{
Some(v.inner().clone())
} else {
None
}
})
.collect();
let deletes = self
.deleted
.iter()
.filter_map(|v| {
if filter.spec && v.spec_epoch > epoch
|| filter.status && v.status_epoch > epoch
|| filter.meta && v.meta_epoch > epoch
{
Some(v.inner().clone())
} else {
None
}
})
.collect();
EpochChanges::new(
self.epoch.epoch(),
EpochDeltaChanges::Changes((updates, deletes)),
)
}
}
#[cfg(test)]
mod test {
use crate::test_fixture::{DefaultTest, TestEpochMap};
use super::ChangeFlag;
#[test]
fn test_metadata_changes() {
let full_change = ChangeFlag::all();
assert!(full_change.has_full_change());
assert!(!full_change.has_no_changes());
let no_change = ChangeFlag::no_change();
assert!(no_change.has_no_changes());
assert!(!no_change.has_full_change());
}
#[test]
fn test_epoch_map_empty() {
let map = TestEpochMap::new();
assert_eq!(map.epoch(), 0);
}
#[test]
fn test_epoch_map_update_simple() {
let mut map = TestEpochMap::new();
// increase epoch
// epoch must be increased before any write occur manually here
// in the store, this is done automatically but this is low level interface
map.increment_epoch();
let test1 = DefaultTest::with_key("t1");
assert!(map.update(test1.key_owned(), test1).is_none()); // new
assert_eq!(map.epoch(), 1);
// test with before base epoch
{
let spec_changes = map.spec_changes_since(-1);
assert_eq!(*spec_changes.current_epoch(), 1); // current epoch is 1
assert!(spec_changes.is_sync_all());
let (updates, deletes) = spec_changes.parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let status_changes = map.status_changes_since(-1);
assert_eq!(*status_changes.current_epoch(), 1); // current epoch is 1
assert!(status_changes.is_sync_all());
let (updates2, deletes2) = status_changes.parts();
assert_eq!(updates2.len(), 1);
assert_eq!(deletes2.len(), 0);
let meta_changes = map.meta_changes_since(-1);
assert_eq!(*meta_changes.current_epoch(), 1); // current epoch is 1
assert!(meta_changes.is_sync_all());
let (updates2, deletes2) = meta_changes.parts();
assert_eq!(updates2.len(), 1);
assert_eq!(deletes2.len(), 0);
let any_change = map.changes_since(-1);
assert_eq!(*any_change.current_epoch(), 1);
assert!(any_change.is_sync_all());
let (updates2, deletes2) = any_change.parts();
assert_eq!(updates2.len(), 1);
assert_eq!(deletes2.len(), 0);
}
// test with current epoch, this return just 1 changes
{
let spec_changes = map.spec_changes_since(0);
assert_eq!(*spec_changes.current_epoch(), 1); // current epoch is 1
assert!(!spec_changes.is_sync_all()); // this is only delta
let (updates, deletes) = spec_changes.parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let status_changes = map.status_changes_since(0);
assert_eq!(*status_changes.current_epoch(), 1); // current epoch is 1
assert!(!status_changes.is_sync_all()); // this is only delta
let (updates, deletes) = status_changes.parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let meta_changes = map.meta_changes_since(0);
assert_eq!(*meta_changes.current_epoch(), 1); // current epoch is 1
assert!(!meta_changes.is_sync_all()); // this is only delta
let (updates, deletes) = meta_changes.parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let any_change = map.changes_since(0);
assert_eq!(*any_change.current_epoch(), 1);
assert!(!any_change.is_sync_all());
let (updates2, deletes2) = any_change.parts();
assert_eq!(updates2.len(), 1);
assert_eq!(deletes2.len(), 0);
}
// test with current epoch which should return 1 single change as well
{
let spec_changes = map.spec_changes_since(1);
assert_eq!(*spec_changes.current_epoch(), 1); // current epoch is 1
assert!(!spec_changes.is_sync_all()); // this is only delta
let (updates, deletes) = spec_changes.parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let status_changes = map.status_changes_since(1);
assert_eq!(*status_changes.current_epoch(), 1); // current epoch is 1
assert!(!status_changes.is_sync_all()); // this is only delta
let (updates, deletes) = status_changes.parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let meta_changes = map.meta_changes_since(1);
assert_eq!(*meta_changes.current_epoch(), 1); // current epoch is 1
assert!(!meta_changes.is_sync_all()); // this is only delta
let (updates, deletes) = meta_changes.parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let any_change = map.changes_since(1);
assert_eq!(*any_change.current_epoch(), 1);
assert!(!any_change.is_sync_all());
let (updates2, deletes2) = any_change.parts();
assert_eq!(updates2.len(), 0);
assert_eq!(deletes2.len(), 0);
}
}
#[test]
fn test_epoch_map_update_status() {
let mut map = TestEpochMap::new();
let test1 = DefaultTest::with_key("t1");
let mut test2 = test1.clone();
test2.status.up = true;
// first epoch
map.increment_epoch();
assert_eq!(test1.ctx().item().rev, 0);
assert!(map.update(test1.key_owned(), test1).is_none());
map.increment_epoch();
// only update status
let changes = map
.update(test2.key_owned(), test2.next_rev())
.expect("update");
assert!(!changes.spec);
assert!(changes.status);
// update the
assert_eq!(map.epoch(), 2);
// test with base epoch, this should return a single changes for spec and status
{
let (updates, deletes) = map.spec_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
}
// test with middle epoch, this should just return status
{
let (updates, deletes) = map.spec_changes_since(1).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(1).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(1).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(1).parts();
assert_eq!(updates.len(), 1); // rev has changed
assert_eq!(deletes.len(), 0);
}
{
let (updates, deletes) = map.spec_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
}
}
#[test]
fn test_epoch_map_update_spec() {
let mut map = TestEpochMap::new();
let test1 = DefaultTest::with_key("t1");
let mut test2 = test1.clone();
test2.spec.replica = 20;
// first epoch
map.increment_epoch();
// there is no test 1 prior, so update should not occur
assert!(map.update(test1.key_owned(), test1).is_none());
map.increment_epoch();
let changes = map
.update(test2.key_owned(), test2.next_rev())
.expect("update");
assert!(changes.spec);
assert!(!changes.status);
// update the
assert_eq!(map.epoch(), 2);
// test with base epoch, this should return a single changes for spec and status
{
let (updates, deletes) = map.spec_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
}
// test with middle epoch, this should just return status
{
let (updates, deletes) = map.spec_changes_since(1).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(1).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(1).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(1).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
}
{
let (updates, deletes) = map.spec_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
}
}
#[test]
fn test_epoch_map_update_meta() {
let mut map = TestEpochMap::new();
let test1 = DefaultTest::with_key("t1");
let mut test2 = test1.clone();
test2.ctx.item_mut().comment = "test".to_owned();
map.increment_epoch();
assert!(map.update(test1.key_owned(), test1).is_none());
// without rev update, no updates
assert!(map
.update(test2.key_owned(), test2.clone())
.expect("update")
.has_no_changes());
map.increment_epoch();
let changes = map
.update(test2.key_owned(), test2.next_rev())
.expect("update");
assert!(!changes.spec);
assert!(!changes.status);
assert!(changes.meta);
// update the
assert_eq!(map.epoch(), 2);
// test with base epoch, this should return a single changes for spec and status
{
let (updates, deletes) = map.spec_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(0).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
}
// changes with meta changes only
{
let (updates, deletes) = map.spec_changes_since(1).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(1).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(1).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(1).parts();
assert_eq!(updates.len(), 1);
assert_eq!(deletes.len(), 0);
}
{
let (updates, deletes) = map.spec_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.status_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
let (updates, deletes) = map.meta_changes_since(2).parts();
assert_eq!(updates.len(), 0);
assert_eq!(deletes.len(), 0);
}
}
}
| 30.11352 | 99 | 0.549367 |
0a8cae09b2e800816ef8cb5a9676e416cc86e489 | 368 | use diesel::prelude::*;
use diesel::sqlite::SqliteConnection;
use dotenv::dotenv;
use std::env;
pub fn establish_connection() -> SqliteConnection {
dotenv().ok();
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
SqliteConnection::establish(&database_url)
.expect(&format!("Error connecting to {}", database_url))
}
| 28.307692 | 83 | 0.701087 |
875bcc91c4c81c4df065dcd76e0b19b5b49adc7b | 11,210 | use std::sync::Arc;
use rusqlite::{Connection, NO_PARAMS, OpenFlags, ToSql, params};
use juniper::{FieldResult, RootNode, graphql_object};
use hyper::{
service::{make_service_fn, service_fn},
Body, Method, Response, Server, StatusCode,
};
use anyhow::{Context, Result, bail};
struct Database {
conn: Connection
}
unsafe impl Sync for Database {}
impl juniper::Context for Database {}
impl Database {
fn new() -> Result<Self> {
Ok(Self {
conn: Self::new_connection().context("Failed to set up database")?
})
}
fn new_connection() -> Result<Connection> {
let conn = Connection::open_with_flags(
"store.db",
OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_NO_MUTEX)?;
let mode: String = conn.prepare("pragma journal_mode=WAL")?.query(NO_PARAMS)?.next()?.unwrap().get(0)?;
println!("mode: {}", mode);
// let _: i32 = conn.prepare("pragma mmap_size=268435456;")?.query(NO_PARAMS)?.next()?.unwrap().get(0)?;
conn.execute(
"create table if not exists events (
id integer primary key,
store text not null,
timestamp text not null,
data text not null,
blob blob
);
",
NO_PARAMS,
)?;
conn.execute("create index if not exists idx_events_store on events (store)", NO_PARAMS)?;
conn.execute("create index if not exists idx_events_timestamp on events (timestamp)", NO_PARAMS)?;
Ok(conn)
}
fn _add_index_for_json_key(&self, key: &str) -> Result<()> {
for ch in key.chars() {
if !ch.is_ascii_alphanumeric() && ch != '_' {
bail!("JSON key must be alphanumeric");
}
}
self.conn.execute(
format!("create index if not exists idx_events_{0} on events (json_extract(data, '$.{0}'))", key).as_str(),
NO_PARAMS,
).context("Failed to create index for a JSON key")?;
Ok(())
}
fn add_event(&self, store: &str, data: &str, blob: Option<Vec<u8>>) -> Result<()> {
self.conn.execute(
"insert into events (store, timestamp, data, blob) values (
?1, strftime('%Y-%m-%d %H:%M:%f', 'now'), json(?2), ?3)",
params![store, data, blob],
).context("Failed to add an event")?;
Ok(())
}
fn delete_store(&self, name: &str) -> Result<()> {
self.conn.execute(
"delete from events where store == ?1",
params!(name),
).context("Failed to delete store")?;
Ok(())
}
fn count_rows_in_store(&self, store: &str) -> Result<i64> {
let count: i64 = self.conn
.prepare("select count(*) from events where store == ?1")?
.query(params![store])?
.next()?.expect("Failed to count rows in store").get(0)?;
Ok(count)
}
fn events(&self, store: Option<String>, before: Option<String>, after: Option<String>, limit: Option<i32>, offset: i32, reverse: bool) -> Result<Vec<Event>> {
let limit = limit.unwrap_or(-1);
let mut where_clauses = vec![];
let mut params = vec![
(":limit", &limit as &dyn ToSql),
(":offset", &offset as &dyn ToSql),
];
if store.is_some() {
where_clauses.push("store == :store");
params.push((":store", &store as &dyn ToSql));
}
if before.is_some() {
where_clauses.push("timestamp < strftime('%Y-%m-%d %H:%M:%f', :before)");
params.push((":before", &before as &dyn ToSql));
}
if after.is_some() {
where_clauses.push("timestamp > strftime('%Y-%m-%d %H:%M:%f', :after)");
params.push((":after", &after as &dyn ToSql));
}
let joined_where = if where_clauses.is_empty() {
"".to_string()
} else {
format!(" where {}", where_clauses.join(" and "))
};
let statement_string = format!("
select * from events
{}
order by id {}
limit :limit offset :offset",
joined_where,
if reverse { "desc" } else { "asc" });
let mut statement = self.conn.prepare(&statement_string)?;
let mut results = statement.query_named(params.as_slice())?;
eprintln!("Executing {:?}", statement_string);
let mut res = Vec::new();
while let Some(row) = results.next()? {
let event = Event {
id: row.get(0)?,
store: Store::new(row.get(1)?),
timestamp: row.get(2)?,
data: row.get(3)?,
blob: row.get(4)?,
};
res.push(event);
}
Ok(res)
}
fn stores(&self, limit: Option<i32>, offset: i32) -> Result<Vec<Store>> {
let mut statement = self.conn.prepare("select distinct store from events limit ?1 offset ?2")?;
eprintln!("Executing {:?}", statement);
let mut results = statement.query(params![limit.unwrap_or(-1), offset])?;
let mut res = Vec::new();
while let Some(row) = results.next()? {
let store_name: String = row.get(0)?;
res.push(Store::new(store_name));
}
Ok(res)
}
fn begin_transaction(&self) -> Result<()> {
self.conn.execute("begin transaction", NO_PARAMS)?;
Ok(())
}
fn commit_transaction(&self) -> Result<()> {
self.conn.execute("commit transaction", NO_PARAMS)?;
Ok(())
}
fn vacuum(&self) -> Result<()> {
self.conn.execute("vacuum", NO_PARAMS)?;
Ok(())
}
}
#[derive(Debug, Clone)]
struct Store {
name: String,
}
impl Store {
fn new(name: String) -> Self {
Self {
name
}
}
}
#[graphql_object(Context = Database)]
impl Store {
fn name(&self) -> &str {
self.name.as_str()
}
fn event_count(&self, context: &Database) -> FieldResult<String> {
Ok(context.count_rows_in_store(&self.name)?.to_string())
}
#[graphql(arguments(after(default = None), before(default = None), limit(default = None), offset(default = 0), reverse(default = false)))]
fn events(&self, after: Option<String>, before: Option<String>, limit: Option<i32>, offset: i32, reverse: bool, context: &Database) -> FieldResult<Vec<Event>> {
Ok(context.events(Some(self.name.clone()), before, after, limit, offset, reverse)?)
}
}
#[derive(Debug)]
struct Mutations {
}
impl Mutations {
fn new() -> Self {
Self { }
}
}
#[graphql_object(Context = Database)]
impl Mutations {
fn delete_store(&self, store: String, context: &Database) -> FieldResult<String> {
context.delete_store(&store)?;
Ok(format!("Successfully deleted store '{}'", store))
}
#[graphql(arguments(blob(default = None)))]
fn add_event(&self, store: String, data: String, blob: Option<String>, context: &Database) -> FieldResult<String> {
let blob = match blob {
Some(encoded) => Some(base64::decode(encoded)?),
None => None,
};
context.add_event(store.as_str(), data.as_str(), blob)?;
Ok("Successfully added an event".to_string())
}
fn vacuum(&self, context: &Database) -> FieldResult<String> {
context.commit_transaction()?;
context.vacuum()?;
context.begin_transaction()?;
Ok("Successfully vacuumed the database".to_string())
}
}
#[derive(Debug)]
struct Stores {}
impl Stores {
fn new() -> Self {
Self { }
}
}
#[graphql_object(Context = Database)]
impl Stores {
#[graphql(arguments(limit(default = None), offset(default = 0)))]
fn stores(&self, limit: Option<i32>, offset: i32, context: &Database) -> FieldResult<Vec<Store>> {
Ok(context.stores(limit, offset)?)
}
fn store(&self, name: String) -> Store {
Store::new(name)
}
#[graphql(arguments(store(default = None), after(default = None), before(default = None), limit(default = None), offset(default = 0), reverse(default = false)))]
fn events(&self, store: Option<String>, after: Option<String>, before: Option<String>, limit: Option<i32>, offset: i32, reverse: bool, context: &Database) -> FieldResult<Vec<Event>> {
Ok(context.events(store, before, after, limit, offset, reverse)?)
}
}
#[derive(Debug)]
struct Event {
id: i64,
store: Store,
timestamp: String,
data: String,
blob: Option<Vec<u8>>,
}
#[graphql_object(Context = Database)]
impl Event {
fn id(&self) -> String {
self.id.to_string()
}
fn store(&self) -> Store {
self.store.clone()
}
fn timestamp(&self) -> &str {
self.timestamp.as_str()
}
fn data(&self) -> &str {
self.data.as_str()
}
fn has_blob(&self) -> bool {
self.blob.is_some()
}
fn blob_as_base64(&self) -> Option<String> {
if let Some(blob) = &self.blob {
Some(base64::encode(blob))
} else {
None
}
}
}
fn _add_test_events(count: usize) -> Result<()> {
let db = Database::new()?;
db.begin_transaction()?;
for i in 0..count {
if i % 100000 == 0 {
println!(".");
}
db.add_event("test", "{}", None)?;
}
db.commit_transaction()?;
Ok(())
}
#[tokio::main]
async fn main() -> Result<()> {
//_add_test_events(10_000_000)?;
let root_node = Arc::new(RootNode::new(Stores::new(), Mutations::new(), juniper::EmptySubscription::new()));
let _db = Arc::new(Database::new()?);
let juniper_service = make_service_fn(move |_| {
let root_node = root_node.clone();
async {
Ok::<_, hyper::Error>(service_fn(move |req| {
let root_node = root_node.clone();
async {
match (req.method(), req.uri().path()) {
(&Method::GET, "/") => juniper_hyper::playground("/graphql", None).await,
(&Method::GET, "/graphql") | (&Method::POST, "/graphql") => {
let ctx = Arc::new(Database::new().unwrap());
ctx.begin_transaction().unwrap();
let res = juniper_hyper::graphql(root_node, ctx.clone(), req).await;
ctx.commit_transaction().unwrap();
res
}
_ => {
let mut response = Response::new(Body::empty());
*response.status_mut() = StatusCode::NOT_FOUND;
Ok(response)
}
}
}
}))
}
});
let addr = ([127, 0, 0, 1], 3000).into();
let server = Server::bind(&addr).serve(juniper_service);
println!("Listening on http://{}", addr);
if let Err(e) = server.await {
eprintln!("server error: {}", e)
}
Ok(())
}
| 31.937322 | 187 | 0.533988 |
143a41631df89a660c5693c41fa322b64a3b7dc6 | 3,841 | #![allow(clippy::eval_order_dependence)] // Needed when using `syn::parenthesized!`.
mod attributes;
mod children;
mod component;
mod element;
mod splice;
use attributes::*;
use children::*;
use component::*;
use element::*;
use splice::*;
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::ext::IdentExt;
use syn::parse::{Parse, ParseStream};
use syn::token::Paren;
use syn::{Ident, LitStr, Result, Token};
pub enum HtmlType {
Component,
Element,
Text,
Splice,
}
pub enum HtmlTree {
Component(Component),
Element(Element),
Text(LitStr),
Splice(Splice),
}
impl HtmlTree {
fn peek_type(input: ParseStream) -> Option<HtmlType> {
let input = input.fork(); // do not affect original ParseStream
if input.peek(LitStr) {
Some(HtmlType::Text)
} else if input.peek(Paren) {
Some(HtmlType::Splice)
} else if input.peek(Token![::]) {
Some(HtmlType::Component)
} else if input.peek(Ident::peek_any) {
let ident: Ident = input.call(Ident::parse_any).ok()?;
let ident = ident.to_string();
if ident.chars().next().unwrap().is_ascii_uppercase() || input.peek(Token![::]) {
Some(HtmlType::Component)
} else {
Some(HtmlType::Element)
}
} else {
None
}
}
}
impl Parse for HtmlTree {
fn parse(input: ParseStream) -> Result<Self> {
let html_type = match Self::peek_type(input) {
Some(html_type) => html_type,
None => return Err(input.error("expected a valid HTML node")),
};
Ok(match html_type {
HtmlType::Component => Self::Component(input.parse()?),
HtmlType::Element => Self::Element(input.parse()?),
HtmlType::Text => Self::Text(input.parse()?),
HtmlType::Splice => Self::Splice(input.parse()?),
})
}
}
impl ToTokens for HtmlTree {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let quoted = match self {
Self::Component(component) => quote! {
#component
},
Self::Element(element) => quote! {
::sycamore::template::Template::new_node(#element)
},
Self::Text(text) => quote! {
::sycamore::template::Template::new_node(
::sycamore::generic_node::GenericNode::text_node(#text),
)
},
Self::Splice(splice) => quote! {
::sycamore::template::Template::new_dyn(move ||
::sycamore::template::IntoTemplate::create(&#splice)
)
},
};
tokens.extend(quoted);
}
}
pub struct HtmlRoot {
pub children: Vec<HtmlTree>,
}
impl Parse for HtmlRoot {
fn parse(input: ParseStream) -> Result<Self> {
let mut children = Vec::new();
while !input.is_empty() {
children.push(input.parse()?);
}
Ok(Self { children })
}
}
impl ToTokens for HtmlRoot {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let quoted = match self.children.as_slice() {
[] => quote! {
::sycamore::template::Template::empty()
},
[node] => node.to_token_stream(),
nodes => quote! {
::sycamore::template::Template::new_fragment({
let mut children = ::std::vec::Vec::new();
#(
children.push(#nodes);
)*
children
})
},
};
tokens.extend(quoted);
}
}
pub fn template_impl(component: HtmlRoot) -> TokenStream {
component.to_token_stream()
}
| 26.86014 | 93 | 0.530331 |
1d913d5380629c9e6fbb6e2b593d0a3629ec48de | 65,479 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// <p>A mapping of a query attached to a resource group that determines the AWS resources
/// that are members of the group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GroupQuery {
/// <p>The name of the resource group that is associated with the specified resource
/// query.</p>
pub group_name: std::option::Option<std::string::String>,
/// <p>The resource query that determines which AWS resources are members of the associated
/// resource group.</p>
pub resource_query: std::option::Option<crate::model::ResourceQuery>,
}
impl std::fmt::Debug for GroupQuery {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GroupQuery");
formatter.field("group_name", &self.group_name);
formatter.field("resource_query", &self.resource_query);
formatter.finish()
}
}
/// See [`GroupQuery`](crate::model::GroupQuery)
pub mod group_query {
/// A builder for [`GroupQuery`](crate::model::GroupQuery)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) group_name: std::option::Option<std::string::String>,
pub(crate) resource_query: std::option::Option<crate::model::ResourceQuery>,
}
impl Builder {
/// <p>The name of the resource group that is associated with the specified resource
/// query.</p>
pub fn group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.group_name = Some(input.into());
self
}
pub fn set_group_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.group_name = input;
self
}
/// <p>The resource query that determines which AWS resources are members of the associated
/// resource group.</p>
pub fn resource_query(mut self, input: crate::model::ResourceQuery) -> Self {
self.resource_query = Some(input);
self
}
pub fn set_resource_query(
mut self,
input: std::option::Option<crate::model::ResourceQuery>,
) -> Self {
self.resource_query = input;
self
}
/// Consumes the builder and constructs a [`GroupQuery`](crate::model::GroupQuery)
pub fn build(self) -> crate::model::GroupQuery {
crate::model::GroupQuery {
group_name: self.group_name,
resource_query: self.resource_query,
}
}
}
}
impl GroupQuery {
/// Creates a new builder-style object to manufacture [`GroupQuery`](crate::model::GroupQuery)
pub fn builder() -> crate::model::group_query::Builder {
crate::model::group_query::Builder::default()
}
}
/// <p>The query that is used to define a resource group or a search for resources. A query
/// specifies both a query type and a query string as a JSON object. See the examples
/// section for example JSON strings.</p>
/// <p>The examples that follow are shown as standard JSON strings. If you include such a
/// string as a parameter to the AWS CLI or an SDK API, you might need to 'escape' the
/// string into a single line. For example, see the <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters-quoting-strings.html">Quoting
/// strings</a> in the <i>AWS CLI User Guide</i>.</p>
/// <p>
/// <b>Example 1</b>
/// </p>
/// <p>The following generic example shows a resource query JSON string that includes only
/// resources that meet the following criteria:</p>
/// <ul>
/// <li>
/// <p>The resource type must be either <code>resource_type1</code> or
/// <code>resource_type2</code>.</p>
/// </li>
/// <li>
/// <p>The resource must have a tag <code>Key1</code> with a value of either
/// <code>ValueA</code> or <code>ValueB</code>.</p>
/// </li>
/// <li>
/// <p>The resource must have a tag <code>Key2</code> with a value of either
/// <code>ValueC</code> or <code>ValueD</code>.</p>
/// </li>
/// </ul>
/// <p>
/// <code>{
/// "Type": "TAG_FILTERS_1_0",
/// "Query": {
/// "ResourceTypeFilters": [ "resource_type1", "resource_type2"],
/// "TagFilters": [
/// {
/// "Key": "Key1",
/// "Values": ["ValueA","ValueB"]
/// },
/// {
/// "Key":"Key2",
/// "Values":["ValueC","ValueD"]
/// }
/// ]
/// }
/// }</code>
/// </p>
/// <p>This has the equivalent "shortcut" syntax of the following:</p>
/// <p>
/// <code>{
/// "Type": "TAG_FILTERS_1_0",
/// "Query": {
/// "ResourceTypeFilters": [ "resource_type1", "resource_type2"],
/// "TagFilters": [
/// { "Key1": ["ValueA","ValueB"] },
/// { "Key2": ["ValueC","ValueD"]
/// }
/// ]
/// }
/// }</code>
/// </p>
/// <p>
/// <b>Example 2</b>
/// </p>
/// <p>The following example shows a resource query JSON string that includes only Amazon EC2
/// instances that are tagged <code>Stage</code> with a value of <code>Test</code>.</p>
/// <p>
/// <code>{
/// "Type": "TAG_FILTERS_1_0",
/// "Query": "{
/// "ResourceTypeFilters": "AWS::EC2::Instance",
/// "TagFilters": { "Stage": "Test" }
/// }
/// }</code>
/// </p>
/// <p>
/// <b>Example 3</b>
/// </p>
/// <p>The following example shows a resource query JSON string that includes resource of any
/// supported type as long as it is tagged <code>Stage</code> with a value of
/// <code>Prod</code>.</p>
/// <p>
/// <code>{
/// "Type": "TAG_FILTERS_1_0",
/// "Query": {
/// "ResourceTypeFilters": "AWS::AllSupported",
/// "TagFilters": { "Stage": "Prod" }
/// }
/// }</code>
/// </p>
/// <p>
/// <b>Example 4</b>
/// </p>
/// <p>The following example shows a resource query JSON string that includes only Amazon EC2
/// instances and Amazon S3 buckets that are part of the specified AWS CloudFormation stack.</p>
/// <p>
/// <code>{
/// "Type": "CLOUDFORMATION_STACK_1_0",
/// "Query": {
/// "ResourceTypeFilters": [ "AWS::EC2::Instance", "AWS::S3::Bucket" ],
/// "StackIdentifier": "arn:aws:cloudformation:us-west-2:123456789012:stack/AWStestuseraccount/fb0d5000-aba8-00e8-aa9e-50d5cEXAMPLE"
/// }
/// }</code>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResourceQuery {
/// <p>The type of the query. You can use the following values:</p>
/// <ul>
/// <li>
/// <p>
/// <i>
/// <code>CLOUDFORMATION_STACK_1_0:</code>
/// </i>Specifies that the
/// <code>Query</code> contains an ARN for a CloudFormation stack.</p>
/// </li>
/// <li>
/// <p>
/// <i>
/// <code>TAG_FILTERS_1_0:</code>
/// </i>Specifies that the
/// <code>Query</code> parameter contains a JSON string that represents a
/// collection of simple tag filters for resource types and tags. The JSON string
/// uses a syntax similar to the <code>
/// <a href="https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html">GetResources</a>
/// </code> operation, but uses only the <code>
/// <a href="https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-ResourceTypeFilters"> ResourceTypeFilters</a>
/// </code> and <code>
/// <a href="https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-TagFiltersTagFilters">TagFilters</a>
/// </code> fields. If you specify more than one tag key,
/// only resources that match all tag keys, and at least one value of each specified
/// tag key, are returned in your query. If you specify more than one value for a
/// tag key, a resource matches the filter if it has a tag key value that matches
/// <i>any</i> of the specified values.</p>
/// <p>For example, consider the following sample query for resources that have two
/// tags, <code>Stage</code> and <code>Version</code>, with two values each:</p>
/// <p>
/// <code>[{"Stage":["Test","Deploy"]},{"Version":["1","2"]}]</code>
/// </p>
/// <p>The results of this query could include the following.</p>
/// <ul>
/// <li>
/// <p>An EC2 instance that has the following two tags:
/// <code>{"Stage":"Deploy"}</code>, and
/// <code>{"Version":"2"}</code>
/// </p>
/// </li>
/// <li>
/// <p>An S3 bucket that has the following two tags:
/// <code>{"Stage":"Test"}</code>, and
/// <code>{"Version":"1"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>The query would not include the following items in the results, however. </p>
/// <ul>
/// <li>
/// <p>An EC2 instance that has only the following tag:
/// <code>{"Stage":"Deploy"}</code>.</p>
/// <p>The instance does not have <b>all</b> of the
/// tag keys specified in the filter, so it is excluded from the
/// results.</p>
/// </li>
/// <li>
/// <p>An RDS database that has the following two tags:
/// <code>{"Stage":"Archived"}</code> and
/// <code>{"Version":"4"}</code>
/// </p>
/// <p>The database has all of the tag keys, but none of those keys has an
/// associated value that matches at least one of the specified values in
/// the filter.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub r#type: std::option::Option<crate::model::QueryType>,
/// <p>The query that defines a group or a search.</p>
pub query: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ResourceQuery {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResourceQuery");
formatter.field("r#type", &self.r#type);
formatter.field("query", &self.query);
formatter.finish()
}
}
/// See [`ResourceQuery`](crate::model::ResourceQuery)
pub mod resource_query {
/// A builder for [`ResourceQuery`](crate::model::ResourceQuery)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) r#type: std::option::Option<crate::model::QueryType>,
pub(crate) query: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The type of the query. You can use the following values:</p>
/// <ul>
/// <li>
/// <p>
/// <i>
/// <code>CLOUDFORMATION_STACK_1_0:</code>
/// </i>Specifies that the
/// <code>Query</code> contains an ARN for a CloudFormation stack.</p>
/// </li>
/// <li>
/// <p>
/// <i>
/// <code>TAG_FILTERS_1_0:</code>
/// </i>Specifies that the
/// <code>Query</code> parameter contains a JSON string that represents a
/// collection of simple tag filters for resource types and tags. The JSON string
/// uses a syntax similar to the <code>
/// <a href="https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html">GetResources</a>
/// </code> operation, but uses only the <code>
/// <a href="https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-ResourceTypeFilters"> ResourceTypeFilters</a>
/// </code> and <code>
/// <a href="https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-TagFiltersTagFilters">TagFilters</a>
/// </code> fields. If you specify more than one tag key,
/// only resources that match all tag keys, and at least one value of each specified
/// tag key, are returned in your query. If you specify more than one value for a
/// tag key, a resource matches the filter if it has a tag key value that matches
/// <i>any</i> of the specified values.</p>
/// <p>For example, consider the following sample query for resources that have two
/// tags, <code>Stage</code> and <code>Version</code>, with two values each:</p>
/// <p>
/// <code>[{"Stage":["Test","Deploy"]},{"Version":["1","2"]}]</code>
/// </p>
/// <p>The results of this query could include the following.</p>
/// <ul>
/// <li>
/// <p>An EC2 instance that has the following two tags:
/// <code>{"Stage":"Deploy"}</code>, and
/// <code>{"Version":"2"}</code>
/// </p>
/// </li>
/// <li>
/// <p>An S3 bucket that has the following two tags:
/// <code>{"Stage":"Test"}</code>, and
/// <code>{"Version":"1"}</code>
/// </p>
/// </li>
/// </ul>
/// <p>The query would not include the following items in the results, however. </p>
/// <ul>
/// <li>
/// <p>An EC2 instance that has only the following tag:
/// <code>{"Stage":"Deploy"}</code>.</p>
/// <p>The instance does not have <b>all</b> of the
/// tag keys specified in the filter, so it is excluded from the
/// results.</p>
/// </li>
/// <li>
/// <p>An RDS database that has the following two tags:
/// <code>{"Stage":"Archived"}</code> and
/// <code>{"Version":"4"}</code>
/// </p>
/// <p>The database has all of the tag keys, but none of those keys has an
/// associated value that matches at least one of the specified values in
/// the filter.</p>
/// </li>
/// </ul>
/// </li>
/// </ul>
pub fn r#type(mut self, input: crate::model::QueryType) -> Self {
self.r#type = Some(input);
self
}
pub fn set_type(mut self, input: std::option::Option<crate::model::QueryType>) -> Self {
self.r#type = input;
self
}
/// <p>The query that defines a group or a search.</p>
pub fn query(mut self, input: impl Into<std::string::String>) -> Self {
self.query = Some(input.into());
self
}
pub fn set_query(mut self, input: std::option::Option<std::string::String>) -> Self {
self.query = input;
self
}
/// Consumes the builder and constructs a [`ResourceQuery`](crate::model::ResourceQuery)
pub fn build(self) -> crate::model::ResourceQuery {
crate::model::ResourceQuery {
r#type: self.r#type,
query: self.query,
}
}
}
}
impl ResourceQuery {
/// Creates a new builder-style object to manufacture [`ResourceQuery`](crate::model::ResourceQuery)
pub fn builder() -> crate::model::resource_query::Builder {
crate::model::resource_query::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum QueryType {
CloudformationStack10,
TagFilters10,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for QueryType {
fn from(s: &str) -> Self {
match s {
"CLOUDFORMATION_STACK_1_0" => QueryType::CloudformationStack10,
"TAG_FILTERS_1_0" => QueryType::TagFilters10,
other => QueryType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for QueryType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(QueryType::from(s))
}
}
impl QueryType {
pub fn as_str(&self) -> &str {
match self {
QueryType::CloudformationStack10 => "CLOUDFORMATION_STACK_1_0",
QueryType::TagFilters10 => "TAG_FILTERS_1_0",
QueryType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"]
}
}
impl AsRef<str> for QueryType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A resource group that contains AWS resources. You can assign resources to the group
/// by associating either of the following elements with the group:</p>
/// <ul>
/// <li>
/// <p>
/// <a>ResourceQuery</a> - Use a resource query to specify a set of tag
/// keys and values. All resources in the same AWS Region and AWS account that
/// have those keys with the same values are included in the group. You can add a
/// resource query when you create the group, or later by using the <a>PutGroupConfiguration</a> operation.</p>
/// </li>
/// <li>
/// <p>
/// <a>GroupConfiguration</a> - Use a service configuration to
/// associate the group with an AWS service. The configuration specifies which
/// resource types can be included in the group.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Group {
/// <p>The ARN of the resource group.</p>
pub group_arn: std::option::Option<std::string::String>,
/// <p>The name of the resource group.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The description of the resource group.</p>
pub description: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for Group {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Group");
formatter.field("group_arn", &self.group_arn);
formatter.field("name", &self.name);
formatter.field("description", &self.description);
formatter.finish()
}
}
/// See [`Group`](crate::model::Group)
pub mod group {
/// A builder for [`Group`](crate::model::Group)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) group_arn: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN of the resource group.</p>
pub fn group_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.group_arn = Some(input.into());
self
}
pub fn set_group_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.group_arn = input;
self
}
/// <p>The name of the resource group.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The description of the resource group.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// Consumes the builder and constructs a [`Group`](crate::model::Group)
pub fn build(self) -> crate::model::Group {
crate::model::Group {
group_arn: self.group_arn,
name: self.name,
description: self.description,
}
}
}
}
impl Group {
/// Creates a new builder-style object to manufacture [`Group`](crate::model::Group)
pub fn builder() -> crate::model::group::Builder {
crate::model::group::Builder::default()
}
}
/// <p>A structure that identifies a resource that is currently pending addition to the group
/// as a member. Adding a resource to a resource group happens asynchronously as a
/// background task and this one isn't completed yet.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PendingResource {
/// <p>The Amazon resource name (ARN) of the resource that's in a pending state.</p>
pub resource_arn: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for PendingResource {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PendingResource");
formatter.field("resource_arn", &self.resource_arn);
formatter.finish()
}
}
/// See [`PendingResource`](crate::model::PendingResource)
pub mod pending_resource {
/// A builder for [`PendingResource`](crate::model::PendingResource)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The Amazon resource name (ARN) of the resource that's in a pending state.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_arn = Some(input.into());
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_arn = input;
self
}
/// Consumes the builder and constructs a [`PendingResource`](crate::model::PendingResource)
pub fn build(self) -> crate::model::PendingResource {
crate::model::PendingResource {
resource_arn: self.resource_arn,
}
}
}
}
impl PendingResource {
/// Creates a new builder-style object to manufacture [`PendingResource`](crate::model::PendingResource)
pub fn builder() -> crate::model::pending_resource::Builder {
crate::model::pending_resource::Builder::default()
}
}
/// <p>A resource that failed to be added to or removed from a group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FailedResource {
/// <p>The ARN of the resource that failed to be added or removed.</p>
pub resource_arn: std::option::Option<std::string::String>,
/// <p>The error message text associated with the failure.</p>
pub error_message: std::option::Option<std::string::String>,
/// <p>The error code associated with the failure.</p>
pub error_code: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for FailedResource {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FailedResource");
formatter.field("resource_arn", &self.resource_arn);
formatter.field("error_message", &self.error_message);
formatter.field("error_code", &self.error_code);
formatter.finish()
}
}
/// See [`FailedResource`](crate::model::FailedResource)
pub mod failed_resource {
/// A builder for [`FailedResource`](crate::model::FailedResource)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_arn: std::option::Option<std::string::String>,
pub(crate) error_message: std::option::Option<std::string::String>,
pub(crate) error_code: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN of the resource that failed to be added or removed.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_arn = Some(input.into());
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_arn = input;
self
}
/// <p>The error message text associated with the failure.</p>
pub fn error_message(mut self, input: impl Into<std::string::String>) -> Self {
self.error_message = Some(input.into());
self
}
pub fn set_error_message(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.error_message = input;
self
}
/// <p>The error code associated with the failure.</p>
pub fn error_code(mut self, input: impl Into<std::string::String>) -> Self {
self.error_code = Some(input.into());
self
}
pub fn set_error_code(mut self, input: std::option::Option<std::string::String>) -> Self {
self.error_code = input;
self
}
/// Consumes the builder and constructs a [`FailedResource`](crate::model::FailedResource)
pub fn build(self) -> crate::model::FailedResource {
crate::model::FailedResource {
resource_arn: self.resource_arn,
error_message: self.error_message,
error_code: self.error_code,
}
}
}
}
impl FailedResource {
/// Creates a new builder-style object to manufacture [`FailedResource`](crate::model::FailedResource)
pub fn builder() -> crate::model::failed_resource::Builder {
crate::model::failed_resource::Builder::default()
}
}
/// <p>A two-part error structure that can occur in <code>ListGroupResources</code> or
/// <code>SearchResources</code> operations on CloudFormation stack-based queries. The error
/// occurs if the CloudFormation stack on which the query is based either does not exist, or has a
/// status that renders the stack inactive. A <code>QueryError</code> occurrence does not
/// necessarily mean that AWS Resource Groups could not complete the operation, but the resulting
/// group might have no member resources.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct QueryError {
/// <p>Possible values are <code>CLOUDFORMATION_STACK_INACTIVE</code> and
/// <code>CLOUDFORMATION_STACK_NOT_EXISTING</code>.</p>
pub error_code: std::option::Option<crate::model::QueryErrorCode>,
/// <p>A message that explains the <code>ErrorCode</code> value. Messages might state that
/// the specified CloudFormation stack does not exist (or no longer exists). For
/// <code>CLOUDFORMATION_STACK_INACTIVE</code>, the message typically states that the
/// CloudFormation stack has a status that is not (or no longer) active, such as
/// <code>CREATE_FAILED</code>.</p>
pub message: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for QueryError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("QueryError");
formatter.field("error_code", &self.error_code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`QueryError`](crate::model::QueryError)
pub mod query_error {
/// A builder for [`QueryError`](crate::model::QueryError)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) error_code: std::option::Option<crate::model::QueryErrorCode>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Possible values are <code>CLOUDFORMATION_STACK_INACTIVE</code> and
/// <code>CLOUDFORMATION_STACK_NOT_EXISTING</code>.</p>
pub fn error_code(mut self, input: crate::model::QueryErrorCode) -> Self {
self.error_code = Some(input);
self
}
pub fn set_error_code(
mut self,
input: std::option::Option<crate::model::QueryErrorCode>,
) -> Self {
self.error_code = input;
self
}
/// <p>A message that explains the <code>ErrorCode</code> value. Messages might state that
/// the specified CloudFormation stack does not exist (or no longer exists). For
/// <code>CLOUDFORMATION_STACK_INACTIVE</code>, the message typically states that the
/// CloudFormation stack has a status that is not (or no longer) active, such as
/// <code>CREATE_FAILED</code>.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`QueryError`](crate::model::QueryError)
pub fn build(self) -> crate::model::QueryError {
crate::model::QueryError {
error_code: self.error_code,
message: self.message,
}
}
}
}
impl QueryError {
/// Creates a new builder-style object to manufacture [`QueryError`](crate::model::QueryError)
pub fn builder() -> crate::model::query_error::Builder {
crate::model::query_error::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum QueryErrorCode {
CloudformationStackInactive,
CloudformationStackNotExisting,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for QueryErrorCode {
fn from(s: &str) -> Self {
match s {
"CLOUDFORMATION_STACK_INACTIVE" => QueryErrorCode::CloudformationStackInactive,
"CLOUDFORMATION_STACK_NOT_EXISTING" => QueryErrorCode::CloudformationStackNotExisting,
other => QueryErrorCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for QueryErrorCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(QueryErrorCode::from(s))
}
}
impl QueryErrorCode {
pub fn as_str(&self) -> &str {
match self {
QueryErrorCode::CloudformationStackInactive => "CLOUDFORMATION_STACK_INACTIVE",
QueryErrorCode::CloudformationStackNotExisting => "CLOUDFORMATION_STACK_NOT_EXISTING",
QueryErrorCode::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"CLOUDFORMATION_STACK_INACTIVE",
"CLOUDFORMATION_STACK_NOT_EXISTING",
]
}
}
impl AsRef<str> for QueryErrorCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A structure that contains the ARN of a resource and its resource type.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResourceIdentifier {
/// <p>The ARN of a resource.</p>
pub resource_arn: std::option::Option<std::string::String>,
/// <p>The resource type of a resource, such as <code>AWS::EC2::Instance</code>.</p>
pub resource_type: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ResourceIdentifier {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResourceIdentifier");
formatter.field("resource_arn", &self.resource_arn);
formatter.field("resource_type", &self.resource_type);
formatter.finish()
}
}
/// See [`ResourceIdentifier`](crate::model::ResourceIdentifier)
pub mod resource_identifier {
/// A builder for [`ResourceIdentifier`](crate::model::ResourceIdentifier)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_arn: std::option::Option<std::string::String>,
pub(crate) resource_type: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN of a resource.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_arn = Some(input.into());
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_arn = input;
self
}
/// <p>The resource type of a resource, such as <code>AWS::EC2::Instance</code>.</p>
pub fn resource_type(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_type = Some(input.into());
self
}
pub fn set_resource_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.resource_type = input;
self
}
/// Consumes the builder and constructs a [`ResourceIdentifier`](crate::model::ResourceIdentifier)
pub fn build(self) -> crate::model::ResourceIdentifier {
crate::model::ResourceIdentifier {
resource_arn: self.resource_arn,
resource_type: self.resource_type,
}
}
}
}
impl ResourceIdentifier {
/// Creates a new builder-style object to manufacture [`ResourceIdentifier`](crate::model::ResourceIdentifier)
pub fn builder() -> crate::model::resource_identifier::Builder {
crate::model::resource_identifier::Builder::default()
}
}
/// <p>An item in a group configuration. A group service configuration can have one or more
/// items. For details about group service configuration syntax, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html">Service configurations for
/// resource groups</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GroupConfigurationItem {
/// <p>Specifies the type of group configuration item. Each item must have a unique value for
/// <code>type</code>. For the list of types that you can specify for a configuration
/// item, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html#about-slg-types">Supported resource types and
/// parameters</a>.</p>
pub r#type: std::option::Option<std::string::String>,
/// <p>A collection of parameters for this group configuration item. For the list of
/// parameters that you can use with each configuration item type, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html#about-slg-types">Supported
/// resource types and parameters</a>.</p>
pub parameters: std::option::Option<std::vec::Vec<crate::model::GroupConfigurationParameter>>,
}
impl std::fmt::Debug for GroupConfigurationItem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GroupConfigurationItem");
formatter.field("r#type", &self.r#type);
formatter.field("parameters", &self.parameters);
formatter.finish()
}
}
/// See [`GroupConfigurationItem`](crate::model::GroupConfigurationItem)
pub mod group_configuration_item {
/// A builder for [`GroupConfigurationItem`](crate::model::GroupConfigurationItem)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) r#type: std::option::Option<std::string::String>,
pub(crate) parameters:
std::option::Option<std::vec::Vec<crate::model::GroupConfigurationParameter>>,
}
impl Builder {
/// <p>Specifies the type of group configuration item. Each item must have a unique value for
/// <code>type</code>. For the list of types that you can specify for a configuration
/// item, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html#about-slg-types">Supported resource types and
/// parameters</a>.</p>
pub fn r#type(mut self, input: impl Into<std::string::String>) -> Self {
self.r#type = Some(input.into());
self
}
pub fn set_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.r#type = input;
self
}
pub fn parameters(
mut self,
input: impl Into<crate::model::GroupConfigurationParameter>,
) -> Self {
let mut v = self.parameters.unwrap_or_default();
v.push(input.into());
self.parameters = Some(v);
self
}
pub fn set_parameters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GroupConfigurationParameter>>,
) -> Self {
self.parameters = input;
self
}
/// Consumes the builder and constructs a [`GroupConfigurationItem`](crate::model::GroupConfigurationItem)
pub fn build(self) -> crate::model::GroupConfigurationItem {
crate::model::GroupConfigurationItem {
r#type: self.r#type,
parameters: self.parameters,
}
}
}
}
impl GroupConfigurationItem {
/// Creates a new builder-style object to manufacture [`GroupConfigurationItem`](crate::model::GroupConfigurationItem)
pub fn builder() -> crate::model::group_configuration_item::Builder {
crate::model::group_configuration_item::Builder::default()
}
}
/// <p>A parameter for a group configuration item. For details about group service
/// configuration syntax, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html">Service configurations for resource
/// groups</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GroupConfigurationParameter {
/// <p>The name of the group configuration parameter. For the list of parameters that you can
/// use with each configuration item type, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html#about-slg-types">Supported resource types and
/// parameters</a>.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The value or values to be used for the specified parameter. For the list of values you
/// can use with each parameter, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html#about-slg-types">Supported resource types and
/// parameters</a>.</p>
pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for GroupConfigurationParameter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GroupConfigurationParameter");
formatter.field("name", &self.name);
formatter.field("values", &self.values);
formatter.finish()
}
}
/// See [`GroupConfigurationParameter`](crate::model::GroupConfigurationParameter)
pub mod group_configuration_parameter {
/// A builder for [`GroupConfigurationParameter`](crate::model::GroupConfigurationParameter)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the group configuration parameter. For the list of parameters that you can
/// use with each configuration item type, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html#about-slg-types">Supported resource types and
/// parameters</a>.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.values.unwrap_or_default();
v.push(input.into());
self.values = Some(v);
self
}
pub fn set_values(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.values = input;
self
}
/// Consumes the builder and constructs a [`GroupConfigurationParameter`](crate::model::GroupConfigurationParameter)
pub fn build(self) -> crate::model::GroupConfigurationParameter {
crate::model::GroupConfigurationParameter {
name: self.name,
values: self.values,
}
}
}
}
impl GroupConfigurationParameter {
/// Creates a new builder-style object to manufacture [`GroupConfigurationParameter`](crate::model::GroupConfigurationParameter)
pub fn builder() -> crate::model::group_configuration_parameter::Builder {
crate::model::group_configuration_parameter::Builder::default()
}
}
/// <p>The unique identifiers for a resource group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GroupIdentifier {
/// <p>The name of the resource group.</p>
pub group_name: std::option::Option<std::string::String>,
/// <p>The ARN of the resource group.</p>
pub group_arn: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GroupIdentifier {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GroupIdentifier");
formatter.field("group_name", &self.group_name);
formatter.field("group_arn", &self.group_arn);
formatter.finish()
}
}
/// See [`GroupIdentifier`](crate::model::GroupIdentifier)
pub mod group_identifier {
/// A builder for [`GroupIdentifier`](crate::model::GroupIdentifier)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) group_name: std::option::Option<std::string::String>,
pub(crate) group_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the resource group.</p>
pub fn group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.group_name = Some(input.into());
self
}
pub fn set_group_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.group_name = input;
self
}
/// <p>The ARN of the resource group.</p>
pub fn group_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.group_arn = Some(input.into());
self
}
pub fn set_group_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.group_arn = input;
self
}
/// Consumes the builder and constructs a [`GroupIdentifier`](crate::model::GroupIdentifier)
pub fn build(self) -> crate::model::GroupIdentifier {
crate::model::GroupIdentifier {
group_name: self.group_name,
group_arn: self.group_arn,
}
}
}
}
impl GroupIdentifier {
/// Creates a new builder-style object to manufacture [`GroupIdentifier`](crate::model::GroupIdentifier)
pub fn builder() -> crate::model::group_identifier::Builder {
crate::model::group_identifier::Builder::default()
}
}
/// <p>A filter collection that you can use to restrict the results from a <code>List</code>
/// operation to only those you want to include.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GroupFilter {
/// <p>The name of the filter. Filter names are case-sensitive.</p>
pub name: std::option::Option<crate::model::GroupFilterName>,
/// <p>One or more filter values. Allowed filter values vary by group filter name, and are
/// case-sensitive.</p>
pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for GroupFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GroupFilter");
formatter.field("name", &self.name);
formatter.field("values", &self.values);
formatter.finish()
}
}
/// See [`GroupFilter`](crate::model::GroupFilter)
pub mod group_filter {
/// A builder for [`GroupFilter`](crate::model::GroupFilter)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<crate::model::GroupFilterName>,
pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the filter. Filter names are case-sensitive.</p>
pub fn name(mut self, input: crate::model::GroupFilterName) -> Self {
self.name = Some(input);
self
}
pub fn set_name(
mut self,
input: std::option::Option<crate::model::GroupFilterName>,
) -> Self {
self.name = input;
self
}
pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.values.unwrap_or_default();
v.push(input.into());
self.values = Some(v);
self
}
pub fn set_values(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.values = input;
self
}
/// Consumes the builder and constructs a [`GroupFilter`](crate::model::GroupFilter)
pub fn build(self) -> crate::model::GroupFilter {
crate::model::GroupFilter {
name: self.name,
values: self.values,
}
}
}
}
impl GroupFilter {
/// Creates a new builder-style object to manufacture [`GroupFilter`](crate::model::GroupFilter)
pub fn builder() -> crate::model::group_filter::Builder {
crate::model::group_filter::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GroupFilterName {
ConfigurationType,
ResourceType,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GroupFilterName {
fn from(s: &str) -> Self {
match s {
"configuration-type" => GroupFilterName::ConfigurationType,
"resource-type" => GroupFilterName::ResourceType,
other => GroupFilterName::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GroupFilterName {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GroupFilterName::from(s))
}
}
impl GroupFilterName {
pub fn as_str(&self) -> &str {
match self {
GroupFilterName::ConfigurationType => "configuration-type",
GroupFilterName::ResourceType => "resource-type",
GroupFilterName::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["configuration-type", "resource-type"]
}
}
impl AsRef<str> for GroupFilterName {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A structure returned by the <a>ListGroupResources</a> operation that
/// contains identity and group membership status information for one of the resources in
/// the group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListGroupResourcesItem {
/// <p>A structure that contains the ARN of a resource and its resource type.</p>
pub identifier: std::option::Option<crate::model::ResourceIdentifier>,
/// <p>A structure that contains the status of this resource's membership in the
/// group.</p>
/// <note>
/// <p>This field is present in the response only if the group is of type
/// <code>AWS::EC2::HostManagement</code>.</p>
/// </note>
pub status: std::option::Option<crate::model::ResourceStatus>,
}
impl std::fmt::Debug for ListGroupResourcesItem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListGroupResourcesItem");
formatter.field("identifier", &self.identifier);
formatter.field("status", &self.status);
formatter.finish()
}
}
/// See [`ListGroupResourcesItem`](crate::model::ListGroupResourcesItem)
pub mod list_group_resources_item {
/// A builder for [`ListGroupResourcesItem`](crate::model::ListGroupResourcesItem)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) identifier: std::option::Option<crate::model::ResourceIdentifier>,
pub(crate) status: std::option::Option<crate::model::ResourceStatus>,
}
impl Builder {
/// <p>A structure that contains the ARN of a resource and its resource type.</p>
pub fn identifier(mut self, input: crate::model::ResourceIdentifier) -> Self {
self.identifier = Some(input);
self
}
pub fn set_identifier(
mut self,
input: std::option::Option<crate::model::ResourceIdentifier>,
) -> Self {
self.identifier = input;
self
}
/// <p>A structure that contains the status of this resource's membership in the
/// group.</p>
/// <note>
/// <p>This field is present in the response only if the group is of type
/// <code>AWS::EC2::HostManagement</code>.</p>
/// </note>
pub fn status(mut self, input: crate::model::ResourceStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::ResourceStatus>,
) -> Self {
self.status = input;
self
}
/// Consumes the builder and constructs a [`ListGroupResourcesItem`](crate::model::ListGroupResourcesItem)
pub fn build(self) -> crate::model::ListGroupResourcesItem {
crate::model::ListGroupResourcesItem {
identifier: self.identifier,
status: self.status,
}
}
}
}
impl ListGroupResourcesItem {
/// Creates a new builder-style object to manufacture [`ListGroupResourcesItem`](crate::model::ListGroupResourcesItem)
pub fn builder() -> crate::model::list_group_resources_item::Builder {
crate::model::list_group_resources_item::Builder::default()
}
}
/// <p>A structure that identifies the current group membership status for a resource. Adding
/// a resource to a resource group is performed asynchronously as a background task. A
/// <code>PENDING</code> status indicates, for this resource, that the process isn't
/// completed yet.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResourceStatus {
/// <p>The current status.</p>
pub name: std::option::Option<crate::model::ResourceStatusValue>,
}
impl std::fmt::Debug for ResourceStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResourceStatus");
formatter.field("name", &self.name);
formatter.finish()
}
}
/// See [`ResourceStatus`](crate::model::ResourceStatus)
pub mod resource_status {
/// A builder for [`ResourceStatus`](crate::model::ResourceStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<crate::model::ResourceStatusValue>,
}
impl Builder {
/// <p>The current status.</p>
pub fn name(mut self, input: crate::model::ResourceStatusValue) -> Self {
self.name = Some(input);
self
}
pub fn set_name(
mut self,
input: std::option::Option<crate::model::ResourceStatusValue>,
) -> Self {
self.name = input;
self
}
/// Consumes the builder and constructs a [`ResourceStatus`](crate::model::ResourceStatus)
pub fn build(self) -> crate::model::ResourceStatus {
crate::model::ResourceStatus { name: self.name }
}
}
}
impl ResourceStatus {
/// Creates a new builder-style object to manufacture [`ResourceStatus`](crate::model::ResourceStatus)
pub fn builder() -> crate::model::resource_status::Builder {
crate::model::resource_status::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ResourceStatusValue {
Pending,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ResourceStatusValue {
fn from(s: &str) -> Self {
match s {
"PENDING" => ResourceStatusValue::Pending,
other => ResourceStatusValue::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ResourceStatusValue {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ResourceStatusValue::from(s))
}
}
impl ResourceStatusValue {
pub fn as_str(&self) -> &str {
match self {
ResourceStatusValue::Pending => "PENDING",
ResourceStatusValue::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["PENDING"]
}
}
impl AsRef<str> for ResourceStatusValue {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A filter name and value pair that is used to obtain more specific results from a list
/// of resources.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResourceFilter {
/// <p>The name of the filter. Filter names are case-sensitive.</p>
pub name: std::option::Option<crate::model::ResourceFilterName>,
/// <p>One or more filter values. Allowed filter values vary by resource filter name, and are
/// case-sensitive.</p>
pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for ResourceFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResourceFilter");
formatter.field("name", &self.name);
formatter.field("values", &self.values);
formatter.finish()
}
}
/// See [`ResourceFilter`](crate::model::ResourceFilter)
pub mod resource_filter {
/// A builder for [`ResourceFilter`](crate::model::ResourceFilter)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<crate::model::ResourceFilterName>,
pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the filter. Filter names are case-sensitive.</p>
pub fn name(mut self, input: crate::model::ResourceFilterName) -> Self {
self.name = Some(input);
self
}
pub fn set_name(
mut self,
input: std::option::Option<crate::model::ResourceFilterName>,
) -> Self {
self.name = input;
self
}
pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.values.unwrap_or_default();
v.push(input.into());
self.values = Some(v);
self
}
pub fn set_values(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.values = input;
self
}
/// Consumes the builder and constructs a [`ResourceFilter`](crate::model::ResourceFilter)
pub fn build(self) -> crate::model::ResourceFilter {
crate::model::ResourceFilter {
name: self.name,
values: self.values,
}
}
}
}
impl ResourceFilter {
/// Creates a new builder-style object to manufacture [`ResourceFilter`](crate::model::ResourceFilter)
pub fn builder() -> crate::model::resource_filter::Builder {
crate::model::resource_filter::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ResourceFilterName {
ResourceType,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ResourceFilterName {
fn from(s: &str) -> Self {
match s {
"resource-type" => ResourceFilterName::ResourceType,
other => ResourceFilterName::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ResourceFilterName {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ResourceFilterName::from(s))
}
}
impl ResourceFilterName {
pub fn as_str(&self) -> &str {
match self {
ResourceFilterName::ResourceType => "resource-type",
ResourceFilterName::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["resource-type"]
}
}
impl AsRef<str> for ResourceFilterName {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A service configuration associated with a resource group. The configuration options
/// are determined by the AWS service that defines the <code>Type</code>, and specifies
/// which resources can be included in the group. You can add a service configuration when
/// you create the group by using <a>CreateGroup</a>, or later by using the <a>PutGroupConfiguration</a> operation. For details about group service
/// configuration syntax, see <a href="https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html">Service configurations for resource
/// groups</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GroupConfiguration {
/// <p>The configuration currently associated with the group and in effect.</p>
pub configuration: std::option::Option<std::vec::Vec<crate::model::GroupConfigurationItem>>,
/// <p>If present, the new configuration that is in the process of being applied to the
/// group.</p>
pub proposed_configuration:
std::option::Option<std::vec::Vec<crate::model::GroupConfigurationItem>>,
/// <p>The current status of an attempt to update the group configuration.</p>
pub status: std::option::Option<crate::model::GroupConfigurationStatus>,
/// <p>If present, the reason why a request to update the group configuration failed.</p>
pub failure_reason: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GroupConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GroupConfiguration");
formatter.field("configuration", &self.configuration);
formatter.field("proposed_configuration", &self.proposed_configuration);
formatter.field("status", &self.status);
formatter.field("failure_reason", &self.failure_reason);
formatter.finish()
}
}
/// See [`GroupConfiguration`](crate::model::GroupConfiguration)
pub mod group_configuration {
/// A builder for [`GroupConfiguration`](crate::model::GroupConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) configuration:
std::option::Option<std::vec::Vec<crate::model::GroupConfigurationItem>>,
pub(crate) proposed_configuration:
std::option::Option<std::vec::Vec<crate::model::GroupConfigurationItem>>,
pub(crate) status: std::option::Option<crate::model::GroupConfigurationStatus>,
pub(crate) failure_reason: std::option::Option<std::string::String>,
}
impl Builder {
pub fn configuration(
mut self,
input: impl Into<crate::model::GroupConfigurationItem>,
) -> Self {
let mut v = self.configuration.unwrap_or_default();
v.push(input.into());
self.configuration = Some(v);
self
}
pub fn set_configuration(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GroupConfigurationItem>>,
) -> Self {
self.configuration = input;
self
}
pub fn proposed_configuration(
mut self,
input: impl Into<crate::model::GroupConfigurationItem>,
) -> Self {
let mut v = self.proposed_configuration.unwrap_or_default();
v.push(input.into());
self.proposed_configuration = Some(v);
self
}
pub fn set_proposed_configuration(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GroupConfigurationItem>>,
) -> Self {
self.proposed_configuration = input;
self
}
/// <p>The current status of an attempt to update the group configuration.</p>
pub fn status(mut self, input: crate::model::GroupConfigurationStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::GroupConfigurationStatus>,
) -> Self {
self.status = input;
self
}
/// <p>If present, the reason why a request to update the group configuration failed.</p>
pub fn failure_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.failure_reason = Some(input.into());
self
}
pub fn set_failure_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.failure_reason = input;
self
}
/// Consumes the builder and constructs a [`GroupConfiguration`](crate::model::GroupConfiguration)
pub fn build(self) -> crate::model::GroupConfiguration {
crate::model::GroupConfiguration {
configuration: self.configuration,
proposed_configuration: self.proposed_configuration,
status: self.status,
failure_reason: self.failure_reason,
}
}
}
}
impl GroupConfiguration {
/// Creates a new builder-style object to manufacture [`GroupConfiguration`](crate::model::GroupConfiguration)
pub fn builder() -> crate::model::group_configuration::Builder {
crate::model::group_configuration::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GroupConfigurationStatus {
UpdateComplete,
UpdateFailed,
Updating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GroupConfigurationStatus {
fn from(s: &str) -> Self {
match s {
"UPDATE_COMPLETE" => GroupConfigurationStatus::UpdateComplete,
"UPDATE_FAILED" => GroupConfigurationStatus::UpdateFailed,
"UPDATING" => GroupConfigurationStatus::Updating,
other => GroupConfigurationStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GroupConfigurationStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GroupConfigurationStatus::from(s))
}
}
impl GroupConfigurationStatus {
pub fn as_str(&self) -> &str {
match self {
GroupConfigurationStatus::UpdateComplete => "UPDATE_COMPLETE",
GroupConfigurationStatus::UpdateFailed => "UPDATE_FAILED",
GroupConfigurationStatus::Updating => "UPDATING",
GroupConfigurationStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["UPDATE_COMPLETE", "UPDATE_FAILED", "UPDATING"]
}
}
impl AsRef<str> for GroupConfigurationStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
| 40.245237 | 200 | 0.615694 |
67b8e0b01714ba103c3173727277131c42f8115c | 8,756 | //! Parallel iterator types for [ranges][std::range],
//! the type for values created by `a..b` expressions
//!
//! You will rarely need to interact with this module directly unless you have
//! need to name one of the iterator types.
//!
//! ```
//! use rayon::prelude::*;
//!
//! let r = (0..100u64).into_par_iter()
//! .sum();
//!
//! // compare result with sequential calculation
//! assert_eq!((0..100).sum::<u64>(), r);
//! ```
//!
//! [std::range]: https://doc.rust-lang.org/core/ops/struct.Range.html
use crate::iter::plumbing::*;
use crate::iter::*;
use std::ops::Range;
use std::usize;
/// Parallel iterator over a range, implemented for all integer types.
///
/// **Note:** The `zip` operation requires `IndexedParallelIterator`
/// which is not implemented for `u64`, `i64`, `u128`, or `i128`.
///
/// ```
/// use rayon::prelude::*;
///
/// let p = (0..25usize).into_par_iter()
/// .zip(0..25usize)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum::<usize>();
///
/// let s = (0..25usize).zip(0..25)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum();
///
/// assert_eq!(p, s);
/// ```
#[derive(Debug, Clone)]
pub struct Iter<T> {
range: Range<T>,
}
impl<T> IntoParallelIterator for Range<T>
where
Iter<T>: ParallelIterator,
{
type Item = <Iter<T> as ParallelIterator>::Item;
type Iter = Iter<T>;
fn into_par_iter(self) -> Self::Iter {
Iter { range: self }
}
}
struct IterProducer<T> {
range: Range<T>,
}
impl<T> IntoIterator for IterProducer<T>
where
Range<T>: Iterator,
{
type Item = <Range<T> as Iterator>::Item;
type IntoIter = Range<T>;
fn into_iter(self) -> Self::IntoIter {
self.range
}
}
macro_rules! indexed_range_impl {
( $t:ty ) => {
impl ParallelIterator for Iter<$t> {
type Item = $t;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl IndexedParallelIterator for Iter<$t> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.range.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(IterProducer { range: self.range })
}
}
impl Producer for IterProducer<$t> {
type Item = <Range<$t> as Iterator>::Item;
type IntoIter = Range<$t>;
fn into_iter(self) -> Self::IntoIter {
self.range
}
fn split_at(self, index: usize) -> (Self, Self) {
assert!(index <= self.range.len());
// For signed $t, the length and requested index could be greater than $t::MAX, and
// then `index as $t` could wrap to negative, so wrapping_add is necessary.
let mid = self.range.start.wrapping_add(index as $t);
let left = self.range.start..mid;
let right = mid..self.range.end;
(IterProducer { range: left }, IterProducer { range: right })
}
}
};
}
trait UnindexedRangeLen<L> {
fn len(&self) -> L;
}
macro_rules! unindexed_range_impl {
( $t:ty, $len_t:ty ) => {
impl UnindexedRangeLen<$len_t> for Range<$t> {
fn len(&self) -> $len_t {
let &Range { start, end } = self;
if end > start {
end.wrapping_sub(start) as $len_t
} else {
0
}
}
}
impl ParallelIterator for Iter<$t> {
type Item = $t;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
#[inline]
fn offset(start: $t) -> impl Fn(usize) -> $t {
move |i| start.wrapping_add(i as $t)
}
if let Some(len) = self.opt_len() {
// Drive this in indexed mode for better `collect`.
(0..len)
.into_par_iter()
.map(offset(self.range.start))
.drive(consumer)
} else {
bridge_unindexed(IterProducer { range: self.range }, consumer)
}
}
fn opt_len(&self) -> Option<usize> {
let len = self.range.len();
if len <= usize::MAX as $len_t {
Some(len as usize)
} else {
None
}
}
}
impl UnindexedProducer for IterProducer<$t> {
type Item = $t;
fn split(mut self) -> (Self, Option<Self>) {
let index = self.range.len() / 2;
if index > 0 {
let mid = self.range.start.wrapping_add(index as $t);
let right = mid..self.range.end;
self.range.end = mid;
(self, Some(IterProducer { range: right }))
} else {
(self, None)
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self)
}
}
};
}
// all Range<T> with ExactSizeIterator
indexed_range_impl! {u8}
indexed_range_impl! {u16}
indexed_range_impl! {u32}
indexed_range_impl! {usize}
indexed_range_impl! {i8}
indexed_range_impl! {i16}
indexed_range_impl! {i32}
indexed_range_impl! {isize}
// other Range<T> with just Iterator
unindexed_range_impl! {u64, u64}
unindexed_range_impl! {i64, u64}
unindexed_range_impl! {u128, u128}
unindexed_range_impl! {i128, u128}
#[test]
fn check_range_split_at_overflow() {
// Note, this split index overflows i8!
let producer = IterProducer { range: -100i8..100 };
let (left, right) = producer.split_at(150);
let r1: i32 = left.range.map(i32::from).sum();
let r2: i32 = right.range.map(i32::from).sum();
assert_eq!(r1 + r2, -100);
}
#[test]
fn test_i128_len_doesnt_overflow() {
use std::{i128, u128};
// Using parse because some versions of rust don't allow long literals
let octillion: i128 = "1000000000000000000000000000".parse().unwrap();
let producer = IterProducer {
range: 0..octillion,
};
assert_eq!(octillion as u128, producer.range.len());
assert_eq!(octillion as u128, (0..octillion).len());
assert_eq!(2 * octillion as u128, (-octillion..octillion).len());
assert_eq!(u128::MAX, (i128::MIN..i128::MAX).len());
}
#[test]
fn test_u64_opt_len() {
use std::{u64, usize};
assert_eq!(Some(100), (0..100u64).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..usize::MAX as u64).into_par_iter().opt_len()
);
if (usize::MAX as u64) < u64::MAX {
assert_eq!(
None,
(0..(usize::MAX as u64).wrapping_add(1))
.into_par_iter()
.opt_len()
);
assert_eq!(None, (0..u64::MAX).into_par_iter().opt_len());
}
}
#[test]
fn test_u128_opt_len() {
use std::{u128, usize};
assert_eq!(Some(100), (0..100u128).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..usize::MAX as u128).into_par_iter().opt_len()
);
assert_eq!(None, (0..1 + usize::MAX as u128).into_par_iter().opt_len());
assert_eq!(None, (0..u128::MAX).into_par_iter().opt_len());
}
// `usize as i64` can overflow, so make sure to wrap it appropriately
// when using the `opt_len` "indexed" mode.
#[test]
#[cfg(target_pointer_width = "64")]
fn test_usize_i64_overflow() {
use std::i64;
use crate::ThreadPoolBuilder;
let iter = (-2..i64::MAX).into_par_iter();
assert_eq!(iter.opt_len(), Some(i64::MAX as usize + 2));
// always run with multiple threads to split into, or this will take forever...
let pool = ThreadPoolBuilder::new().num_threads(8).build().unwrap();
pool.install(|| assert_eq!(iter.find_last(|_| true), Some(i64::MAX - 1)));
}
| 29.284281 | 99 | 0.515532 |
232dfda793151cd6d1550bbef06be3edac9264d4 | 36,737 | use crate::{
conversion::{CoordSeq, CoordType, ToSFCGALGeom},
errors::get_last_error,
utils::{_c_string_with_size, _string, check_computed_value, check_predicate},
{Result, ToSFCGAL},
};
use num_traits::FromPrimitive;
use sfcgal_sys::{
initialize, sfcgal_geometry_approximate_medial_axis, sfcgal_geometry_area,
sfcgal_geometry_area_3d, sfcgal_geometry_as_text, sfcgal_geometry_as_text_decim,
sfcgal_geometry_clone, sfcgal_geometry_collection_add_geometry,
sfcgal_geometry_collection_create, sfcgal_geometry_collection_geometry_n,
sfcgal_geometry_collection_num_geometries, sfcgal_geometry_convexhull,
sfcgal_geometry_convexhull_3d, sfcgal_geometry_covers, sfcgal_geometry_covers_3d,
sfcgal_geometry_delete, sfcgal_geometry_difference, sfcgal_geometry_difference_3d,
sfcgal_geometry_distance, sfcgal_geometry_distance_3d, sfcgal_geometry_extrude,
sfcgal_geometry_intersection, sfcgal_geometry_intersection_3d, sfcgal_geometry_intersects,
sfcgal_geometry_intersects_3d, sfcgal_geometry_is_3d, sfcgal_geometry_is_empty,
sfcgal_geometry_is_measured, sfcgal_geometry_is_planar, sfcgal_geometry_is_valid,
sfcgal_geometry_is_valid_detail, sfcgal_geometry_line_sub_string,
sfcgal_geometry_minkowski_sum, sfcgal_geometry_offset_polygon, sfcgal_geometry_orientation,
sfcgal_geometry_straight_skeleton, sfcgal_geometry_straight_skeleton_distance_in_m,
sfcgal_geometry_t, sfcgal_geometry_tesselate, sfcgal_geometry_triangulate_2dz,
sfcgal_geometry_type_id, sfcgal_geometry_union, sfcgal_geometry_union_3d,
sfcgal_geometry_volume, sfcgal_io_read_wkt, sfcgal_multi_linestring_create,
sfcgal_multi_point_create, sfcgal_multi_polygon_create, size_t,
};
use std::{ffi::CString, mem::MaybeUninit, os::raw::c_char, ptr::NonNull};
/// SFCGAL Geometry types.
///
/// Indicates the type of shape represented by a `SFCGeometry`.
/// ([C API reference](https://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga1afcf1fad6c2daeca001481b125b84c6))
#[repr(C)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Primitive)]
pub enum GeomType {
Point = 1,
Linestring = 2,
Polygon = 3,
Multipoint = 4,
Multilinestring = 5,
Multipolygon = 6,
Geometrycollection = 7,
Polyhedralsurface = 15,
Triangulatedsurface = 16,
Triangle = 100,
Solid = 101,
Multisolid = 102,
}
impl GeomType {
fn is_collection_type(&self) -> bool {
match &self {
GeomType::Multipoint
| GeomType::Multilinestring
| GeomType::Multipolygon
| GeomType::Multisolid
| GeomType::Geometrycollection => true,
_ => false,
}
}
}
/// Represents the orientation of a `SFCGeometry`.
#[derive(PartialEq, Eq, Debug, Primitive)]
pub enum Orientation {
CounterClockWise = -1isize,
ClockWise = 1isize,
Undetermined = 0isize,
}
/// Object representing a SFCGAL Geometry.
///
/// Most of the operations allowed by SFCGAL C API are wrapped,
/// except those modifying the geometry in-place (such as adding a new
/// point to a linestring for example) and those retrieving a specific part
/// of a geometry (such as getting the 2nd interior ring of some polygon as a linestring).
/// However, this can easily be done by yourself by converting them from/to coordinates
/// with the `new_from_coordinates` and `to_coordinates` methods.
///
/// ([C API reference](https://oslandia.github.io/SFCGAL/doxygen/group__capi.html#gadd6d3ea5a71a957581248791624fad58))
#[repr(C)]
pub struct SFCGeometry {
pub(crate) c_geom: NonNull<sfcgal_geometry_t>,
pub(crate) owned: bool,
}
impl Drop for SFCGeometry {
fn drop(&mut self) {
if self.owned {
unsafe { sfcgal_geometry_delete(self.c_geom.as_mut()) }
}
}
}
impl Clone for SFCGeometry {
fn clone(&self) -> SFCGeometry {
SFCGeometry {
c_geom: NonNull::new(unsafe { sfcgal_geometry_clone(self.c_geom.as_ref()) }).unwrap(),
owned: true,
}
}
}
impl std::fmt::Debug for SFCGeometry {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.to_wkt_decim(8).unwrap())
}
}
impl SFCGeometry {
/// Create a geometry by parsing a [WKT](https://en.wikipedia.org/wiki/Well-known_text) string.
pub fn new(wkt: &str) -> Result<SFCGeometry> {
initialize();
let c_str = CString::new(wkt)?;
let obj = unsafe { sfcgal_io_read_wkt(c_str.as_ptr(), wkt.len() as size_t) };
unsafe { SFCGeometry::new_from_raw(obj, true) }
}
pub(crate) unsafe fn new_from_raw(
g: *mut sfcgal_geometry_t,
owned: bool,
) -> Result<SFCGeometry> {
Ok(SFCGeometry {
owned,
c_geom: NonNull::new(g).ok_or_else(|| {
format_err!(
"Obtained null pointer when creating geometry: {}",
get_last_error()
)
})?,
})
}
pub fn new_from_coordinates<T>(coords: &CoordSeq<T>) -> Result<SFCGeometry>
where
T: ToSFCGALGeom + CoordType,
{
coords.to_sfcgal()
}
/// Returns a WKT representation of the given `SFCGeometry` using CGAL exact integer fractions as coordinate values.
/// ([C API reference](http://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga3bc1954e3c034b60f0faff5e8227c398))
pub fn to_wkt(&self) -> Result<String> {
let mut ptr = MaybeUninit::<*mut c_char>::uninit();
let mut length: size_t = 0;
unsafe {
sfcgal_geometry_as_text(self.c_geom.as_ref(), ptr.as_mut_ptr(), &mut length);
Ok(_c_string_with_size(ptr.assume_init(), length))
}
}
/// Returns a WKT representation of the given `SFCGeometry` using floating point coordinate values with
/// the desired number of decimals.
/// ([C API reference](http://oslandia.github.io/SFCGAL/doxygen/group__capi.html#gaaf23f2c95fd48810beb37d07a9652253))
pub fn to_wkt_decim(&self, nb_decim: i32) -> Result<String> {
let mut ptr = MaybeUninit::<*mut c_char>::uninit();
let mut length: size_t = 0;
unsafe {
sfcgal_geometry_as_text_decim(
self.c_geom.as_ref(),
nb_decim,
ptr.as_mut_ptr(),
&mut length,
);
Ok(_c_string_with_size(ptr.assume_init(), length))
}
}
/// Test if the given `SFCGeometry` is empty or not.
pub fn is_empty(&self) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_is_empty(self.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Test if the given `SFCGeometry` is valid or not.
pub fn is_valid(&self) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_is_valid(self.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Test if the given `SFCGeometry` is measured (has an 'm' coordinates)
pub fn is_measured(&self) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_is_measured(self.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Returns reason for the invalidity or None in case of validity.
pub fn validity_detail(&self) -> Result<Option<String>> {
let mut ptr = MaybeUninit::<*mut c_char>::uninit();
unsafe {
let rv = sfcgal_geometry_is_valid_detail(
self.c_geom.as_ptr(),
ptr.as_mut_ptr(),
std::ptr::null::<sfcgal_geometry_t>() as *mut *mut sfcgal_geometry_t,
);
match rv {
1 => Ok(None),
0 => Ok(Some(_string(ptr.assume_init()))),
_ => Err(format_err!("SFCGAL error: {}", get_last_error())),
}
}
}
/// Test if the given `SFCGeometry` is planar or not.
pub fn is_planar(&self) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_is_planar(self.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Test if the given `SFCGeometry` is a 3d geometry or not.
pub fn is_3d(&self) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_is_3d(self.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Returns the SFCGAL type of the given `SFCGeometry`.
pub fn _type(&self) -> Result<GeomType> {
let type_geom = unsafe { sfcgal_geometry_type_id(self.c_geom.as_ptr()) };
GeomType::from_u32(type_geom)
.ok_or_else(|| format_err!("Unknown geometry type (val={})", type_geom))
}
/// Computes the distance to an other `SFCGeometry`.
pub fn distance(&self, other: &SFCGeometry) -> Result<f64> {
let distance =
unsafe { sfcgal_geometry_distance(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
check_computed_value(distance)
}
/// Computes the 3d distance to an other `SFCGeometry`.
pub fn distance_3d(&self, other: &SFCGeometry) -> Result<f64> {
let distance =
unsafe { sfcgal_geometry_distance_3d(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
check_computed_value(distance)
}
/// Computes the area of the given `SFCGeometry`.
pub fn area(&self) -> Result<f64> {
let area = unsafe { sfcgal_geometry_area(self.c_geom.as_ptr()) };
check_computed_value(area)
}
/// Computes the 3d area of the given `SFCGeometry`.
pub fn area_3d(&self) -> Result<f64> {
let area = unsafe { sfcgal_geometry_area_3d(self.c_geom.as_ptr()) };
check_computed_value(area)
}
/// Computes the volume of the given `SFCGeometry` (must be a volume).
pub fn volume(&self) -> Result<f64> {
let volume = unsafe { sfcgal_geometry_volume(self.c_geom.as_ptr()) };
check_computed_value(volume)
}
/// Computes the orientation of the given `SFCGeometry` (must be a Polygon)
pub fn orientation(&self) -> Result<Orientation> {
let orientation = unsafe { sfcgal_geometry_orientation(self.c_geom.as_ptr()) };
Orientation::from_i32(orientation)
.ok_or_else(|| format_err!("Error while retrieving orientation (val={})", orientation))
}
/// Test the intersection with an other `SFCGeometry`.
pub fn intersects(&self, other: &SFCGeometry) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_intersects(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Test the 3d intersection with an other `SFCGeometry`.
pub fn intersects_3d(&self, other: &SFCGeometry) -> Result<bool> {
let rv =
unsafe { sfcgal_geometry_intersects_3d(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Returns the intersection of the given `SFCGeometry` to an other `SFCGeometry`.
pub fn intersection(&self, other: &SFCGeometry) -> Result<SFCGeometry> {
let result =
unsafe { sfcgal_geometry_intersection(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the 3d intersection of the given `SFCGeometry` to an other `SFCGeometry`.
pub fn intersection_3d(&self, other: &SFCGeometry) -> Result<SFCGeometry> {
let result =
unsafe { sfcgal_geometry_intersection_3d(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
///
pub fn covers(&self, other: &SFCGeometry) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_covers(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
check_predicate(rv)
}
///
pub fn covers_3d(&self, other: &SFCGeometry) -> Result<bool> {
let rv = unsafe { sfcgal_geometry_covers_3d(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
check_predicate(rv)
}
/// Returns the difference of the given `SFCGeometry` to an other `SFCGeometry`.
pub fn difference(&self, other: &SFCGeometry) -> Result<SFCGeometry> {
let result =
unsafe { sfcgal_geometry_difference(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the 3d difference of the given `SFCGeometry` to an other `SFCGeometry`.
pub fn difference_3d(&self, other: &SFCGeometry) -> Result<SFCGeometry> {
let result =
unsafe { sfcgal_geometry_difference_3d(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the union of the given `SFCGeometry` to an other `SFCGeometry`.
pub fn union(&self, other: &SFCGeometry) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_union(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the 3d union of the given `SFCGeometry` to an other `SFCGeometry`.
pub fn union_3d(&self, other: &SFCGeometry) -> Result<SFCGeometry> {
let result =
unsafe { sfcgal_geometry_union_3d(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the minkowski sum of the given `SFCGeometry` and an other `SFCGEOMETRY`.
/// ([C API reference](https://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga02d35888dac40eee2eb2a2b133979c8d))
pub fn minkowski_sum(&self, other: &SFCGeometry) -> Result<SFCGeometry> {
let result =
unsafe { sfcgal_geometry_minkowski_sum(self.c_geom.as_ptr(), other.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the straight skeleton of the given `SFCGeometry`.
/// ([C API reference](http://oslandia.github.io/SFCGAL/doxygen/group__capi.html#gaefaa76b61d66e2ad11d902e6b5a13635))
pub fn straight_skeleton(&self) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_straight_skeleton(self.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the straight skeleton of the given `SFCGeometry` with the distance to the border as M coordinate.
/// ([C API reference](http://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga972ea9e378eb2dc99c00b6ad57d05e88))
pub fn straight_skeleton_distance_in_m(&self) -> Result<SFCGeometry> {
let result =
unsafe { sfcgal_geometry_straight_skeleton_distance_in_m(self.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the approximate medial axis for the given `SFCGeometry` Polygon.
/// ([C API reference](http://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga16a9b4b1211843f8444284b1fefebc46))
pub fn approximate_medial_axis(&self) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_approximate_medial_axis(self.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the offset polygon of the given `SFCGeometry`.
/// ([C API reference](http://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga9766f54ebede43a9b71fccf1524a1054))
pub fn offset_polygon(&self, radius: f64) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_offset_polygon(self.c_geom.as_ptr(), radius) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the extrusion of the given `SFCGeometry` (not supported on Solid and Multisolid).
/// ([C API reference](https://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga277d01bd9978e13644baa1755f1cd3e0)
pub fn extrude(&self, ex: f64, ey: f64, ez: f64) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_extrude(self.c_geom.as_ptr(), ex, ey, ez) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns a tesselation of the given `SFCGeometry`.
/// ([C API reference](http://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga570ce6214f305ed35ebbec62d366b588))
pub fn tesselate(&self) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_tesselate(self.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns a triangulation of the given `SFCGeometry`.
/// ([C API reference](https://oslandia.github.io/SFCGAL/doxygen/group__capi.html#gae382792f387654a9adb2e2c38735e08d))
pub fn triangulate_2dz(&self) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_triangulate_2dz(self.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the convex hull of the given `SFCGeometry`.
/// ([C API reference](https://oslandia.github.io/SFCGAL/doxygen/group__capi.html#ga9027b5654cbacf6c2106d70b129d3a23))
pub fn convexhull(&self) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_convexhull(self.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the 3d convex hull of the given `SFCGeometry`.
/// ([C API reference](https://oslandia.github.io/SFCGAL/doxygen/group__capi.html#gacf01a9097f2059afaad871658b4b5a6f))
pub fn convexhull_3d(&self) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_convexhull_3d(self.c_geom.as_ptr()) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Returns the substring of the given `SFCGeometry` LineString between fractional distances.
/// ([C API reference](https://oslandia.gitlab.io/SFCGAL/doxygen/group__capi.html#ga9184685ade86d02191ffaf0337ed3c1d))
pub fn line_substring(&self, start: f64, end: f64) -> Result<SFCGeometry> {
let result = unsafe { sfcgal_geometry_line_sub_string(self.c_geom.as_ptr(), start, end) };
unsafe { SFCGeometry::new_from_raw(result, true) }
}
/// Create a SFCGeometry collection type (MultiPoint, MultiLineString, MultiPolygon, MultiSolid
/// or GeometryCollection) given a mutable slice of `SFCGeometry`'s
/// (this is a destructive operation)
/// ``` rust
/// use sfcgal::SFCGeometry;
/// let a = SFCGeometry::new("POINT(1.0 1.0)").unwrap();
/// let b = SFCGeometry::new("POINT(2.0 2.0)").unwrap();
/// let g = SFCGeometry::create_collection(&mut[a, b]).unwrap();
/// assert_eq!(
/// g.to_wkt_decim(1).unwrap(),
/// "MULTIPOINT((1.0 1.0),(2.0 2.0))",
/// );
/// ```
pub fn create_collection(geoms: &mut [SFCGeometry]) -> Result<SFCGeometry> {
if geoms.is_empty() {
let res_geom = unsafe { sfcgal_geometry_collection_create() };
return unsafe { SFCGeometry::new_from_raw(res_geom, true) };
}
let types = geoms
.iter()
.map(|g| g._type().unwrap())
.collect::<Vec<GeomType>>();
let multis = types
.iter()
.map(|gt| gt.is_collection_type())
.collect::<Vec<bool>>();
if !is_all_same(&types) || multis.iter().any(|&x| x == true) {
let res_geom = unsafe { sfcgal_geometry_collection_create() };
make_multi_geom(res_geom, geoms)
} else if types[0] == GeomType::Point {
let res_geom = unsafe { sfcgal_multi_point_create() };
make_multi_geom(res_geom, geoms)
} else if types[0] == GeomType::Linestring {
let res_geom = unsafe { sfcgal_multi_linestring_create() };
make_multi_geom(res_geom, geoms)
} else if types[0] == GeomType::Polygon {
let res_geom = unsafe { sfcgal_multi_polygon_create() };
make_multi_geom(res_geom, geoms)
} else if types[0] == GeomType::Solid {
let mut res_geom = SFCGeometry::new("MULTISOLID EMPTY")?;
res_geom.owned = false;
make_multi_geom(res_geom.c_geom.as_ptr(), geoms)
} else {
unreachable!();
}
}
/// Get the members of a SFCGeometry.
/// Returns Err if the SFCGeometry if not a collection (i.e. if it's type
/// is not in { MultiPoint, MultiLineString, MultiPolygon, MultiSolid, GeometryCollection }).
/// The original geometry stay untouched.
/// ``` rust
/// use sfcgal::SFCGeometry;
/// let g = SFCGeometry::new("MULTIPOINT((1.0 1.0),(2.0 2.0))").unwrap();
/// let members = g.get_collection_members().unwrap();
/// assert_eq!(
/// members[0].to_wkt_decim(1).unwrap(),
/// "POINT(1.0 1.0)",
/// );
/// assert_eq!(
/// members[1].to_wkt_decim(1).unwrap(),
/// "POINT(2.0 2.0)",
/// );
/// ```
pub fn get_collection_members(self) -> Result<Vec<SFCGeometry>> {
let _type = self._type()?;
if !_type.is_collection_type() {
return Err(format_err!(
"Error: the given geometry doesn't have any member ({:?} is not a collection type)",
_type,
));
}
unsafe {
let ptr = self.c_geom.as_ptr();
let n_geom = sfcgal_geometry_collection_num_geometries(ptr);
let mut result = Vec::new();
for n in 0..n_geom {
let _original_c_geom = sfcgal_geometry_collection_geometry_n(ptr, n);
let clone_c_geom = sfcgal_geometry_clone(_original_c_geom);
result.push(SFCGeometry::new_from_raw(clone_c_geom, true)?);
}
Ok(result)
}
}
}
fn is_all_same<T>(arr: &[T]) -> bool
where
T: Ord + Eq,
{
arr.iter().min() == arr.iter().max()
}
fn make_multi_geom(
out_multi: *mut sfcgal_geometry_t,
geoms: &mut [SFCGeometry],
) -> Result<SFCGeometry> {
for sfcgal_geom in geoms.into_iter() {
unsafe {
sfcgal_geom.owned = false;
sfcgal_geometry_collection_add_geometry(
out_multi,
sfcgal_geom.c_geom.as_ptr() as *mut sfcgal_geometry_t,
)
};
}
unsafe { SFCGeometry::new_from_raw(out_multi, true) }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn creation_point_from_wkt() {
let geom = SFCGeometry::new("POINT(1.0 1.0)");
assert!(geom.is_ok());
}
#[test]
fn creation_polygon_from_wkt() {
let geom = SFCGeometry::new("POLYGON((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 0.0))");
assert!(geom.is_ok());
let geom = geom.unwrap();
assert!(geom.is_valid().unwrap(), true);
let geom1 = SFCGeometry::new("POINT(1.0 1.0)").unwrap();
assert_eq!(geom.intersects(&geom1).unwrap(), true);
}
#[test]
fn writing_to_wkt() {
let geom = SFCGeometry::new("POINT(1.0 1.0)");
assert!(geom.is_ok());
let wkt = geom.unwrap().to_wkt();
assert!(wkt.is_ok());
assert_eq!(wkt.unwrap(), String::from("POINT(1/1 1/1)"));
}
#[test]
fn writing_to_wkt_with_decimals() {
let geom = SFCGeometry::new("POINT(1.0 1.0)");
assert!(geom.is_ok());
let wkt = geom.unwrap().to_wkt_decim(1);
assert!(wkt.is_ok());
assert_eq!(wkt.unwrap(), String::from("POINT(1.0 1.0)"));
}
#[test]
fn creation_failed_with_error_message() {
let geom = SFCGeometry::new("POINT(1, 1)");
assert!(geom.is_err());
assert_eq!(
geom.err().unwrap().to_string(),
"Obtained null pointer when creating geometry: WKT parse error, Coordinate dimension < 2 (, 1))",
)
}
#[test]
fn distance_to_other() {
let pt1 = SFCGeometry::new("POINT(1.0 1.0)").unwrap();
let pt2 = SFCGeometry::new("POINT(10.0 1.0)").unwrap();
let distance = pt1.distance(&pt2).unwrap();
assert_eq!(distance, 9.0);
}
#[test]
fn distance_3d_to_other() {
let pt1 = SFCGeometry::new("POINT(1.0 1.0 2.0)").unwrap();
let pt2 = SFCGeometry::new("POINT(10.0 1.0 2.0)").unwrap();
let distance = pt1.distance_3d(&pt2).unwrap();
assert_eq!(distance, 9.0);
}
#[test]
fn measured_geometry() {
let pt1 = SFCGeometry::new("POINT(1.0 1.0)").unwrap();
let pt2 = SFCGeometry::new("POINTM(1.0 1.0 2.0)").unwrap();
assert_eq!(pt1.is_measured().unwrap(), false);
assert_eq!(pt2.is_measured().unwrap(), true);
}
#[test]
fn area() {
let polygon = SFCGeometry::new("POLYGON((1 1, 3 1, 4 4, 1 3, 1 1))").unwrap();
assert_eq!(polygon.area().unwrap(), 6.0);
}
#[test]
fn area_3d() {
let polygon = SFCGeometry::new("POLYGON((1 1 1, 3 1 1, 4 4 1, 1 3 1, 1 1 1))").unwrap();
assert_ulps_eq!(polygon.area_3d().unwrap(), 6.0);
}
#[test]
fn volume() {
let cube = SFCGeometry::new(
"SOLID((((0 0 0,0 0 1,0 1 1,0 1 0,0 0 0)),\
((0 0 0,0 1 0,1 1 0,1 0 0,0 0 0)),\
((0 0 0,1 0 0,1 0 1,0 0 1,0 0 0)),\
((1 0 0,1 1 0,1 1 1,1 0 1,1 0 0)),\
((0 0 1,1 0 1,1 1 1,0 1 1,0 0 1)),\
((0 1 0,0 1 1,1 1 1,1 1 0,0 1 0))))",
)
.unwrap();
assert_eq!(cube.volume().unwrap(), 1.);
}
#[test]
fn volume_on_not_volume_geometry() {
let surface = SFCGeometry::new(
"POLYHEDRALSURFACE Z \
(((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)),\
((0 0 0, 0 1 0, 0 1 1, 0 0 1, 0 0 0)),\
((0 0 0, 1 0 0, 1 0 1, 0 0 1, 0 0 0)),\
((1 1 1, 1 0 1, 0 0 1, 0 1 1, 1 1 1)),\
((1 1 1, 1 0 1, 1 0 0, 1 1 0, 1 1 1)),\
((1 1 1, 1 1 0, 0 1 0, 0 1 1, 1 1 1)))",
)
.unwrap();
assert_eq!(surface.volume().is_err(), true);
}
#[test]
fn predicates() {
let pt = SFCGeometry::new("POINT(1.0 1.0)").unwrap();
assert_eq!(pt.is_valid().unwrap(), true);
assert_eq!(pt.is_3d().unwrap(), false);
assert_eq!(pt.is_empty().unwrap(), false);
assert_eq!(
pt.is_planar().err().unwrap().to_string(),
"SFCGAL error: is_planar() only applies to polygons",
);
let linestring_3d = SFCGeometry::new("LINESTRING(10.0 1.0 2.0, 1.0 2.0 1.7)").unwrap();
assert_eq!(linestring_3d.is_valid().unwrap(), true);
assert_eq!(linestring_3d.is_3d().unwrap(), true);
assert_eq!(linestring_3d.is_empty().unwrap(), false);
assert_eq!(
linestring_3d.is_planar().err().unwrap().to_string(),
"SFCGAL error: is_planar() only applies to polygons",
);
let empty_geom = SFCGeometry::new("LINESTRING EMPTY").unwrap();
assert_eq!(empty_geom.is_valid().unwrap(), true);
assert_eq!(empty_geom.is_3d().unwrap(), false);
assert_eq!(empty_geom.is_empty().unwrap(), true);
assert_eq!(
linestring_3d.is_planar().err().unwrap().to_string(),
"SFCGAL error: is_planar() only applies to polygons",
);
let polyg = SFCGeometry::new("POLYGON((1 1, 3 1, 4 4, 1 3, 1 1))").unwrap();
assert_eq!(polyg.is_valid().unwrap(), true);
assert_eq!(polyg.is_3d().unwrap(), false);
assert_eq!(polyg.is_empty().unwrap(), false);
assert_eq!(polyg.is_planar().unwrap(), true);
assert_eq!(pt.intersects(&polyg).unwrap(), true);
assert_eq!(pt.intersects_3d(&linestring_3d).unwrap(), false);
}
#[test]
fn validity_detail_on_valid_geom() {
let line = SFCGeometry::new("LINESTRING(10.0 1.0 2.0, 1.0 2.0 1.7)").unwrap();
assert_eq!(line.is_valid().unwrap(), true);
assert_eq!(line.validity_detail().unwrap(), None);
}
#[test]
fn validity_detail_on_invalid_geom() {
let surface = SFCGeometry::new(
"POLYHEDRALSURFACE Z \
(((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)),\
((0 0 0, 0 1 0, 0 1 1, 0 0 1, 0 0 0)),\
((0 0 0, 1 0 0, 1 0 1, 0 0 1, 0 0 0)),\
((1 1 1, 1 0 1, 0 0 1, 0 1 1, 1 1 1)),\
((1 1 1, 1 0 1, 1 0 0, 1 1 0, 1 1 1)),\
((1 1 1, 1 1 0, 0 1 0, 0 1 1, 1 1 1)))",
)
.unwrap();
assert_eq!(surface.is_valid().unwrap(), false);
assert_eq!(
surface.validity_detail().unwrap(),
Some(String::from("inconsistent orientation of PolyhedralSurface detected at edge 3 (4-7) of polygon 5")),
);
}
#[test]
fn straight_skeleton() {
let geom = SFCGeometry::new("POLYGON((0 0,1 0,1 1,0 1,0 0))").unwrap();
let result = geom.straight_skeleton().unwrap();
let wkt = result.to_wkt_decim(1).unwrap();
assert_eq!(
wkt,
"MULTILINESTRING((-0.0 -0.0,0.5 0.5),(1.0 -0.0,0.5 0.5),(1.0 1.0,0.5 0.5),(-0.0 1.0,0.5 0.5))",
);
}
#[test]
fn straight_skeleton_distance_in_m() {
let geom = SFCGeometry::new("POLYGON((0 0,1 0,1 1,0 1,0 0))").unwrap();
let result = geom.straight_skeleton_distance_in_m().unwrap();
let wkt = result.to_wkt_decim(1).unwrap();
assert_eq!(
wkt,
"MULTILINESTRING M(\
(-0.0 -0.0 0.0,0.5 0.5 0.5),\
(1.0 -0.0 0.0,0.5 0.5 0.5),\
(1.0 1.0 0.0,0.5 0.5 0.5),\
(-0.0 1.0 0.0,0.5 0.5 0.5))",
);
}
#[test]
fn tesselate() {
let geom = SFCGeometry::new("POLYGON((0.0 0.0,1.0 0.0,1.0 1.0,0.0 1.0,0.0 0.0))").unwrap();
let result = geom.tesselate().unwrap();
let output_wkt = result.to_wkt_decim(1).unwrap();
assert_eq!(
output_wkt,
"TIN(((0.0 1.0,1.0 0.0,1.0 1.0,0.0 1.0)),((0.0 1.0,0.0 0.0,1.0 0.0,0.0 1.0)))",
);
}
#[test]
fn offset_polygon() {
let geom = SFCGeometry::new("POLYGON((0.0 0.0,1.0 0.0,1.0 1.0,0.0 1.0,0.0 0.0))").unwrap();
let buff = geom.offset_polygon(1.).unwrap();
assert!(buff.is_valid().unwrap());
assert!(!buff.is_empty().unwrap());
}
#[test]
fn extrude_polygon() {
let geom = SFCGeometry::new("POLYGON((0.0 0.0,1.0 0.0,1.0 1.0,0.0 1.0,0.0 0.0))").unwrap();
let extr = geom.extrude(0., 0., 1.).unwrap();
assert!(extr.is_valid().unwrap());
assert!(!extr.is_empty().unwrap());
assert_eq!(extr._type().unwrap(), GeomType::Solid);
}
#[test]
fn tesselate_invariant_geom() {
let input_wkt = String::from("POINT(1.0 1.0)");
let pt = SFCGeometry::new(&input_wkt).unwrap();
let result = pt.tesselate().unwrap();
let output_wkt = result.to_wkt_decim(1).unwrap();
assert_eq!(input_wkt, output_wkt);
}
#[test]
fn line_substring() {
let g = SFCGeometry::new("LINESTRING(10.0 1.0 2.0, 1.0 2.0 1.7)").unwrap();
let result = g.line_substring(-0.2, 0.2).unwrap();
assert_eq!(
result.to_wkt_decim(1).unwrap(),
"LINESTRING(2.8 1.8 1.8,8.2 1.2 1.9)"
);
// With "start" or "end" point not in [-1; 1]
assert_eq!(
g.line_substring(-2., 0.2).err().unwrap().to_string(),
"Obtained null pointer when creating geometry: SFCGAL::algorithm::lineSubstring: start value out of range."
);
}
#[test]
fn difference_3d() {
let cube1 = SFCGeometry::new(
"
SOLID((((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)),\
((0 0 0, 0 0 1, 0 1 1, 0 1 0, 0 0 0)),\
((0 0 0, 1 0 0, 1 0 1, 0 0 1, 0 0 0)),\
((1 1 1, 0 1 1, 0 0 1, 1 0 1, 1 1 1)),\
((1 1 1, 1 0 1, 1 0 0, 1 1 0, 1 1 1)),\
((1 1 1, 1 1 0, 0 1 0, 0 1 1, 1 1 1))))",
)
.unwrap();
let cube2 = SFCGeometry::new(
"
SOLID((((0 0 0.5, 0 1 0.5, 1 1 0.5, 1 0 0.5, 0 0 0.5)),\
((0 0 0.5, 0 0 1, 0 1 1, 0 1 0.5, 0 0 0.5)),\
((0 0 0.5, 1 0 0.5, 1 0 1, 0 0 1, 0 0 0.5)),\
((1 1 1, 0 1 1, 0 0 1, 1 0 1, 1 1 1)),\
((1 1 1, 1 0 1, 1 0 0.5, 1 1 0.5, 1 1 1)),\
((1 1 1, 1 1 0.5, 0 1 0.5, 0 1 1, 1 1 1))))",
)
.unwrap();
let diff = cube1.difference_3d(&cube2).unwrap();
assert_eq!(diff.is_valid().unwrap(), true);
assert_ulps_eq!(diff.volume().unwrap(), 0.5);
}
#[test]
fn intersection_3d() {
let cube1 = SFCGeometry::new(
"
SOLID((((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)),\
((0 0 0, 0 0 1, 0 1 1, 0 1 0, 0 0 0)),\
((0 0 0, 1 0 0, 1 0 1, 0 0 1, 0 0 0)),\
((1 1 1, 0 1 1, 0 0 1, 1 0 1, 1 1 1)),\
((1 1 1, 1 0 1, 1 0 0, 1 1 0, 1 1 1)),\
((1 1 1, 1 1 0, 0 1 0, 0 1 1, 1 1 1))))",
)
.unwrap();
let cube2 = SFCGeometry::new(
"
SOLID((((0 0 0.5, 0 1 0.5, 1 1 0.5, 1 0 0.5, 0 0 0.5)),\
((0 0 0.5, 0 0 1, 0 1 1, 0 1 0.5, 0 0 0.5)),\
((0 0 0.5, 1 0 0.5, 1 0 1, 0 0 1, 0 0 0.5)),\
((1 1 1, 0 1 1, 0 0 1, 1 0 1, 1 1 1)),\
((1 1 1, 1 0 1, 1 0 0.5, 1 1 0.5, 1 1 1)),\
((1 1 1, 1 1 0.5, 0 1 0.5, 0 1 1, 1 1 1))))",
)
.unwrap();
let diff = cube1.intersection_3d(&cube2).unwrap();
assert_eq!(diff.is_valid().unwrap(), true);
assert_ulps_eq!(diff.volume().unwrap(), 0.5);
}
#[test]
fn create_collection_empty() {
let g = SFCGeometry::create_collection(&mut []).unwrap();
assert_eq!(g.to_wkt_decim(1).unwrap(), "GEOMETRYCOLLECTION EMPTY",);
}
#[test]
fn create_collection_heterogenous() {
let a = SFCGeometry::new("POINT(1.0 1.0)").unwrap();
let b = SFCGeometry::new("LINESTRING(10.0 1.0 2.0, 1.0 2.0 1.7)").unwrap();
let g = SFCGeometry::create_collection(&mut [a, b]).unwrap();
assert_eq!(
g.to_wkt_decim(1).unwrap(),
"GEOMETRYCOLLECTION(POINT(1.0 1.0),LINESTRING(10.0 1.0 2.0,1.0 2.0 1.7))",
);
}
#[test]
fn create_collection_multipoint_from_points() {
let a = SFCGeometry::new("POINT(1.0 1.0)").unwrap();
let b = SFCGeometry::new("POINT(2.0 2.0)").unwrap();
let g = SFCGeometry::create_collection(&mut [a, b]).unwrap();
assert_eq!(
g.to_wkt_decim(1).unwrap(),
"MULTIPOINT((1.0 1.0),(2.0 2.0))",
);
}
#[test]
fn create_collection_multilinestring_from_linestrings() {
let a = SFCGeometry::new("LINESTRING(10.0 1.0 2.0, 1.0 2.0 1.7)").unwrap();
let b = SFCGeometry::new("LINESTRING(10.0 1.0 2.0, 1.0 2.0 1.7)").unwrap();
let g = SFCGeometry::create_collection(&mut [a, b]).unwrap();
assert_eq!(
g.to_wkt_decim(1).unwrap(),
"MULTILINESTRING((10.0 1.0 2.0,1.0 2.0 1.7),(10.0 1.0 2.0,1.0 2.0 1.7))",
);
}
#[test]
fn create_collection_multisolid_from_solids() {
let a = SFCGeometry::new(
"SOLID((((0 0 0,0 0 1,0 1 1,0 1 0,0 0 0)),\
((0 0 0,0 1 0,1 1 0,1 0 0,0 0 0)),\
((0 0 0,1 0 0,1 0 1,0 0 1,0 0 0)),\
((1 0 0,1 1 0,1 1 1,1 0 1,1 0 0)),\
((0 0 1,1 0 1,1 1 1,0 1 1,0 0 1)),\
((0 1 0,0 1 1,1 1 1,1 1 0,0 1 0))))",
)
.unwrap();
let b = SFCGeometry::new(
"SOLID((((0 0 0,0 0 1,0 1 1,0 1 0,0 0 0)),\
((0 0 0,0 1 0,1 1 0,1 0 0,0 0 0)),\
((0 0 0,1 0 0,1 0 1,0 0 1,0 0 0)),\
((1 0 0,1 1 0,1 1 1,1 0 1,1 0 0)),\
((0 0 1,1 0 1,1 1 1,0 1 1,0 0 1)),\
((0 1 0,0 1 1,1 1 1,1 1 0,0 1 0))))",
)
.unwrap();
let g = SFCGeometry::create_collection(&mut [a, b]).unwrap();
assert_eq!(
g.to_wkt_decim(1).unwrap(),
"MULTISOLID(\
((\
((0.0 0.0 0.0,0.0 0.0 1.0,0.0 1.0 1.0,0.0 1.0 0.0,0.0 0.0 0.0)),\
((0.0 0.0 0.0,0.0 1.0 0.0,1.0 1.0 0.0,1.0 0.0 0.0,0.0 0.0 0.0)),\
((0.0 0.0 0.0,1.0 0.0 0.0,1.0 0.0 1.0,0.0 0.0 1.0,0.0 0.0 0.0)),\
((1.0 0.0 0.0,1.0 1.0 0.0,1.0 1.0 1.0,1.0 0.0 1.0,1.0 0.0 0.0)),\
((0.0 0.0 1.0,1.0 0.0 1.0,1.0 1.0 1.0,0.0 1.0 1.0,0.0 0.0 1.0)),\
((0.0 1.0 0.0,0.0 1.0 1.0,1.0 1.0 1.0,1.0 1.0 0.0,0.0 1.0 0.0))\
)),\
((\
((0.0 0.0 0.0,0.0 0.0 1.0,0.0 1.0 1.0,0.0 1.0 0.0,0.0 0.0 0.0)),\
((0.0 0.0 0.0,0.0 1.0 0.0,1.0 1.0 0.0,1.0 0.0 0.0,0.0 0.0 0.0)),\
((0.0 0.0 0.0,1.0 0.0 0.0,1.0 0.0 1.0,0.0 0.0 1.0,0.0 0.0 0.0)),\
((1.0 0.0 0.0,1.0 1.0 0.0,1.0 1.0 1.0,1.0 0.0 1.0,1.0 0.0 0.0)),\
((0.0 0.0 1.0,1.0 0.0 1.0,1.0 1.0 1.0,0.0 1.0 1.0,0.0 0.0 1.0)),\
((0.0 1.0 0.0,0.0 1.0 1.0,1.0 1.0 1.0,1.0 1.0 0.0,0.0 1.0 0.0))\
))\
)",
);
}
}
| 40.37033 | 122 | 0.581757 |
0a957e8efa7638c60b202709d367dca02b014ad8 | 2,885 | //! Generation of seed points for field line tracing.
pub mod criterion;
pub mod manual;
pub mod slice;
pub mod volume;
use crate::{
geometry::{Idx3, Point3},
grid::Grid3,
num::BFloat,
};
use rayon::prelude::*;
/// Floating-point precision to use for seeding.
#[allow(non_camel_case_types)]
pub type fsd = f32;
/// Defines the properties of a 3D seed point generator.
pub trait Seeder3:
IntoIterator<Item = Point3<fsd>> + IntoParallelIterator<Item = Point3<fsd>>
{
/// Returns the number of seed points that will be produced by the seeder.
fn number_of_points(&self) -> usize;
/// Filters the seed points using the given predicate.
fn retain_points<P>(&mut self, predicate: P)
where
P: FnMut(&Point3<fsd>) -> bool;
/// Creates a list of seed indices from the seed points by looking up the grid cells
/// of the given grid containing the seed points.
fn to_index_seeder<F, G>(&self, grid: &G) -> Vec<Idx3<usize>>
where
F: BFloat,
G: Grid3<F>;
}
/// Defines the properties of a 3D seed index generator.
pub trait IndexSeeder3:
IntoIterator<Item = Idx3<usize>> + IntoParallelIterator<Item = Idx3<usize>>
{
/// Returns the number of seed indices that will be produced by the seeder.
fn number_of_indices(&self) -> usize;
/// Filters the seed indices using the given predicate.
fn retain_indices<P>(&mut self, predicate: P)
where
P: FnMut(&Idx3<usize>) -> bool;
/// Creates a list of seed points from the seed indices by indexing the center coordinates
/// of the given grid.
fn to_point_seeder<F, G>(&self, grid: &G) -> Vec<Point3<fsd>>
where
F: BFloat,
G: Grid3<F>;
}
// Let a vector of points work as a seeder.
impl Seeder3 for Vec<Point3<fsd>> {
fn number_of_points(&self) -> usize {
self.len()
}
fn retain_points<P>(&mut self, predicate: P)
where
P: FnMut(&Point3<fsd>) -> bool,
{
self.retain(predicate);
}
fn to_index_seeder<F, G>(&self, grid: &G) -> Vec<Idx3<usize>>
where
F: BFloat,
G: Grid3<F>,
{
self.iter()
.map(|point| {
grid.find_closest_grid_cell(&Point3::from(point))
.expect_inside_or_moved()
})
.collect()
}
}
// Let a vector of indices work as a seeder.
impl IndexSeeder3 for Vec<Idx3<usize>> {
fn number_of_indices(&self) -> usize {
self.len()
}
fn retain_indices<P>(&mut self, predicate: P)
where
P: FnMut(&Idx3<usize>) -> bool,
{
self.retain(predicate);
}
fn to_point_seeder<F, G>(&self, grid: &G) -> Vec<Point3<fsd>>
where
F: BFloat,
G: Grid3<F>,
{
self.par_iter()
.map(|indices| Point3::from(&grid.centers().point(indices)))
.collect()
}
}
| 26.46789 | 94 | 0.605199 |
e5d89f692ab2c0f0889d65e693d4aced743a0650 | 1,119 | use std::prelude::v1::*;
use {crate::result::Result, regex::Regex, serde::Serialize, thiserror::Error};
#[derive(Error, Serialize, Debug, PartialEq)]
pub enum StringExtError {
#[error("unreachable literal unary operation")]
UnreachablePatternParsing,
}
pub trait StringExt {
fn like(&self, pattern: &str, case_sensitive: bool) -> Result<bool>;
}
impl StringExt for String {
fn like(&self, pattern: &str, case_sensitive: bool) -> Result<bool> {
let (match_string, match_pattern) = match case_sensitive {
true => (self.to_string(), pattern.to_string()),
false => {
let lowercase_string = self.to_lowercase();
let lowercase_pattern = pattern.to_lowercase();
(lowercase_string, lowercase_pattern)
}
};
Ok(Regex::new(&format!(
"^{}$",
regex::escape(match_pattern.as_str())
.replace("%", ".*")
.replace("_", ".")
))
.map_err(|_| StringExtError::UnreachablePatternParsing)?
.is_match(match_string.as_str()))
}
}
| 30.243243 | 78 | 0.581769 |
0995733ba91b4a9ecc0e3c5dfb0cdf83c2972e47 | 7,330 | extern crate rand;
extern crate time;
extern crate bincode;
extern crate image;
extern crate rayon;
#[cfg(feature = "gui")]
extern crate sdl2;
#[macro_use]
extern crate serde_derive;
pub mod camera;
pub mod geometry;
pub mod light;
pub mod material;
pub mod math;
pub mod mesh;
pub mod primitive;
pub mod scene;
pub mod texture;
pub mod obj;
mod distribution;
mod integrator;
mod warp;
mod bvh;
use rand::{Rng, SeedableRng};
use time::PreciseTime;
use rayon::prelude::*;
use math::*;
use scene::*;
use camera::*;
use texture::*;
use integrator::estimate_radiance;
static OUTPUT_FILE: &'static str = "/tmp/image.ppm";
pub fn render(scene: Scene, camera: Camera, spp: u32) {
let (width, height) = camera.resolution();
let mut sum_rad = vec![Vec3::zero(); width * height];
println!("Start rendering with {} samples per pixel...", spp);
let start = PreciseTime::now();
sum_rad.par_chunks_mut(width).enumerate().for_each(|(y, row)| {
let mut rng: rand::XorShiftRng = rand::random();
for (x, p) in row.iter_mut().enumerate() {
for _ in 0..spp {
let ray = camera.make_ray((x, y), rng.gen(), rng.gen());
let v = estimate_radiance(&scene, ray, &mut rng);
if !v.has_nan() {
*p += v;
}
}
}
});
let end = PreciseTime::now();
let tot_s = start.to(end).num_milliseconds() as f32 / 1000.0;
println!("Rendered {} spp in {:.3}s ({:.3}s per sample)", spp, tot_s, tot_s / spp as f32);
write_ppm_srgb(OUTPUT_FILE, width, height, camera.tonemap, sum_rad.iter().map(|&sr| sr / spp as f32));
}
pub fn render_preview(scene: Scene, camera: Camera) {
#[cfg(feature = "gui")]
render_preview_gui(scene, camera);
#[cfg(not(feature = "gui"))]
render_preview_file(scene, camera);
}
#[cfg(feature = "gui")]
pub fn render_preview_gui(scene: Scene, camera: Camera) {
use sdl2::pixels::PixelFormatEnum;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
let mut camera = camera;
let (width, height) = camera.resolution();
let mut sum_rad = vec![Vec3::zero(); width * height];
let mut spp = 0;
let mut rngs: Vec<rand::XorShiftRng> = Vec::new();
for _ in 0..height {
rngs.push(rand::random());
}
let mut tonemapped: Vec<u8> = vec![0; width * height * 3];
// init a SDL window and a texture
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let window = video_subsystem.window("tracing", width as u32, height as u32).build().unwrap();
let mut event_pump = sdl_context.event_pump().unwrap();
let mut canvas = window.into_canvas().build().unwrap();
let texture_creator = canvas.texture_creator();
let mut texture = texture_creator.create_texture_streaming(PixelFormatEnum::RGB24, width as u32, height as u32).unwrap();
println!("Start rendering...");
let mut start = PreciseTime::now();
'rendering: loop {
// render a new frame
sum_rad.par_chunks_mut(width).enumerate().zip(rngs.par_iter_mut()).for_each(|((y, row), rng)| {
for (x, p) in row.iter_mut().enumerate() {
let ray = camera.make_ray((x, y), rng.gen(), rng.gen());
let v = estimate_radiance(&scene, ray, rng);
if !v.has_nan() {
*p += v;
}
}
});
spp += 1;
//println!("{} spp", spp);
// tonemap the current data and display it
sum_rad.par_iter().zip(tonemapped.par_chunks_mut(3)).for_each(|(&sr, tm)| {
let v = (camera.tonemap)(sr / spp as f32).map(|x| x * 255.0 + 0.5);
tm[0] = v.x as u8;
tm[1] = v.y as u8;
tm[2] = v.z as u8;
});
texture.update(None, &tonemapped, width * 3).unwrap();
canvas.copy(&texture, None, None).unwrap();
canvas.present();
canvas.window_mut().set_title(&format!("tracing - {} spp", spp)).unwrap();
// process sdl events
for event in event_pump.poll_iter() {
match event {
Event::Quit {..}
//| Event::KeyDown { keycode: Some(Keycode::Escape), .. }
| Event::KeyDown { keycode: Some(Keycode::Q), .. } => {
break 'rendering;
}
//Event::Window { win_event: sdl2::event::WindowEvent::Exposed, .. } => {
// canvas.copy(&texture, None, None).unwrap();
// canvas.present();
//}
Event::MouseButtonDown { x, y, .. } => {
let ray = camera.make_ray((x as usize, y as usize), (0.0, 0.0), (0.0, 0.0));
match scene.intersect(ray) {
Some(Hit::Scatterer(its, _)) => {
println!("restarting with focal distance = {}", its.distance);
camera.set_focus_dist(Some(its.distance));
for sr in &mut sum_rad {
*sr = Vec3::zero();
}
spp = 0;
start = PreciseTime::now();
}
_ => println!("no intersection"),
}
}
_ => {}
};
}
}
let end = PreciseTime::now();
let tot_s = start.to(end).num_milliseconds() as f32 / 1000.0;
println!("Rendered {} spp in {:.3}s ({:.3}s per sample)", spp, tot_s, tot_s / spp as f32);
write_ppm_srgb(OUTPUT_FILE, width, height, camera.tonemap, sum_rad.iter().map(|&sr| sr / spp as f32));
}
pub fn render_preview_file(scene: Scene, camera: Camera) {
let camera = camera;
let (width, height) = camera.resolution();
let mut sum_rad = vec![Vec3::zero(); width * height];
let mut spp = 0;
let mut rngs: Vec<rand::XorShiftRng> = Vec::new();
for _ in 0..height {
rngs.push(rand::random());
}
let mut tonemapped: Vec<u8> = vec![0; width * height * 3];
println!("Start rendering...");
let start = PreciseTime::now();
const SPP_STEP: usize = 16;
'rendering: loop {
// render a new frame
sum_rad.par_chunks_mut(width).enumerate().zip(rngs.par_iter_mut()).for_each(|((y, row), rng)| {
let mut local_rng = rng.clone();
for (x, p) in row.iter_mut().enumerate() {
for _ in 0..SPP_STEP {
let ray = camera.make_ray((x, y), local_rng.gen(), local_rng.gen());
let v = estimate_radiance(&scene, ray, &mut local_rng);
if !v.has_nan() {
*p += v;
}
}
}
*rng = local_rng.clone();
});
spp += SPP_STEP;
// tonemap the current data and dump it
sum_rad.par_iter().zip(tonemapped.par_chunks_mut(3)).for_each(|(&sr, tm)| {
let v = (camera.tonemap)(sr / spp as f32).map(|x| x * 255.0 + 0.5);
tm[0] = v.x as u8;
tm[1] = v.y as u8;
tm[2] = v.z as u8;
});
write_ppm_raw(OUTPUT_FILE, width, height, &tonemapped);
let end = PreciseTime::now();
let tot_s = start.to(end).num_milliseconds() as f32 / 1000.0;
println!("Rendered {} spp in {:.3}s ({:.3}s per sample)", spp, tot_s, tot_s / spp as f32);
}
}
pub fn render_seq(scene: Scene, camera: Camera, spp: u32) {
let (width, height) = camera.resolution();
let mut sum_rad = vec![Vec3::zero(); width * height];
//let mut rng: rand::XorShiftRng = rand::random();
let mut rng = rand::XorShiftRng::from_seed([0u32, 1u32, 2u32, 3u32]);
println!("Start rendering with {} samples per pixel...", spp);
let start = PreciseTime::now();
sum_rad.chunks_mut(width).enumerate().for_each(|(y, row)| {
for (x, p) in row.iter_mut().enumerate() {
for _ in 0..spp {
let ray = camera.make_ray((x, y), rng.gen(), rng.gen());
let v = estimate_radiance(&scene, ray, &mut rng);
if !v.has_nan() {
*p += v;
}
}
}
});
let end = PreciseTime::now();
let tot_s = start.to(end).num_milliseconds() as f32 / 1000.0;
println!("Rendered {} spp in {:.3}s ({:.3}s per sample)", spp, tot_s, tot_s / spp as f32);
write_ppm_srgb(OUTPUT_FILE, width, height, camera.tonemap, sum_rad.iter().map(|&sr| sr / spp as f32));
}
| 29.676113 | 122 | 0.627831 |
cc567005b62dea4cca25cb58f264bd9a6d8a281d | 35,713 | //! Variant conversions
//!
//! This module contains the trait [`VariantExt`] and the types [`Variant`], [`VtEmpty`], [`VtNull`].
//!
//! It implements [`VariantExt`] for many built in types to enable conversions to VARIANT.
//!
//! [`VariantExt`]: trait.VariantExt.html
//! [`Variant`]: struct.Variant.html
//! [`VtEmpty`]: struct.VtEmpty.html
//! [`VtNull`]: struct.VtNull.html
/*
///
/// Reference:
/// typedef struct tagVARIANT {
/// union {
/// struct {
/// VARTYPE vt;
/// WORD wReserved1;
/// WORD wReserved2;
/// WORD wReserved3;
/// union {
/// LONGLONG llVal;
/// LONG lVal;
/// BYTE bVal;
/// SHORT iVal;
/// FLOAT fltVal;
/// DOUBLE dblVal;
/// VARIANT_BOOL boolVal;
/// SCODE scode;
/// CY cyVal;
/// DATE date;
/// BSTR bstrVal;
/// IUnknown *punkVal;
/// IDispatch *pdispVal;
/// SAFEARRAY *parray;
/// BYTE *pbVal;
/// SHORT *piVal;
/// LONG *plVal;
/// LONGLONG *pllVal;
/// FLOAT *pfltVal;
/// DOUBLE *pdblVal;
/// VARIANT_BOOL *pboolVal;
/// SCODE *pscode;
/// CY *pcyVal;
/// DATE *pdate;
/// BSTR *pbstrVal;
/// IUnknown **ppunkVal;
/// IDispatch **ppdispVal;
/// SAFEARRAY **pparray;
/// VARIANT *pvarVal;
/// PVOID byref;
/// CHAR cVal;
/// USHORT uiVal;
/// ULONG ulVal;
/// ULONGLONG ullVal;
/// INT intVal;
/// UINT uintVal;
/// DECIMAL *pdecVal;
/// CHAR *pcVal;
/// USHORT *puiVal;
/// ULONG *pulVal;
/// ULONGLONG *pullVal;
/// INT *pintVal;
/// UINT *puintVal;
/// struct {
/// PVOID pvRecord;
/// IRecordInfo *pRecInfo;
/// } __VARIANT_NAME_4;
/// } __VARIANT_NAME_3;
/// } __VARIANT_NAME_2;
/// DECIMAL decVal;
/// } __VARIANT_NAME_1;
/// } VARIANT;*/
/*
* VARENUM usage key,
*
* * [V] - may appear in a VARIANT
* * [T] - may appear in a TYPEDESC
* * [P] - may appear in an OLE property set
* * [S] - may appear in a Safe Array
*
*
* VT_EMPTY [V] [P] nothing
* VT_NULL [V] [P] SQL style Null
* VT_I2 [V][T][P][S] 2 byte signed int
* VT_I4 [V][T][P][S] 4 byte signed int
* VT_R4 [V][T][P][S] 4 byte real
* VT_R8 [V][T][P][S] 8 byte real
* VT_CY [V][T][P][S] currency
* VT_DATE [V][T][P][S] date
* VT_BSTR [V][T][P][S] OLE Automation string
* VT_DISPATCH [V][T] [S] IDispatch *
* VT_ERROR [V][T][P][S] SCODE
* VT_BOOL [V][T][P][S] True=-1, False=0
* VT_VARIANT [V][T][P][S] VARIANT *
* VT_UNKNOWN [V][T] [S] IUnknown *
* VT_DECIMAL [V][T] [S] 16 byte fixed point
* VT_RECORD [V] [P][S] user defined type
* VT_I1 [V][T][P][s] signed char
* VT_UI1 [V][T][P][S] unsigned char
* VT_UI2 [V][T][P][S] unsigned short
* VT_UI4 [V][T][P][S] ULONG
* VT_I8 [T][P] signed 64-bit int
* VT_UI8 [T][P] unsigned 64-bit int
* VT_INT [V][T][P][S] signed machine int
* VT_UINT [V][T] [S] unsigned machine int
* VT_INT_PTR [T] signed machine register size width
* VT_UINT_PTR [T] unsigned machine register size width
* VT_VOID [T] C style void
* VT_HRESULT [T] Standard return type
* VT_PTR [T] pointer type
* VT_SAFEARRAY [T] (use VT_ARRAY in VARIANT)
* VT_CARRAY [T] C style array
* VT_USERDEFINED [T] user defined type
* VT_LPSTR [T][P] null terminated string
* VT_LPWSTR [T][P] wide null terminated string
* VT_FILETIME [P] FILETIME
* VT_BLOB [P] Length prefixed bytes
* VT_STREAM [P] Name of the stream follows
* VT_STORAGE [P] Name of the storage follows
* VT_STREAMED_OBJECT [P] Stream contains an object
* VT_STORED_OBJECT [P] Storage contains an object
* VT_VERSIONED_STREAM [P] Stream with a GUID version
* VT_BLOB_OBJECT [P] Blob contains an object
* VT_CF [P] Clipboard format
* VT_CLSID [P] A Class ID
* VT_VECTOR [P] simple counted array
* VT_ARRAY [V] SAFEARRAY*
* VT_BYREF [V] void* for local use
* VT_BSTR_BLOB Reserved for system use
*/
use std::marker::PhantomData;
use std::mem;
use std::ptr::{NonNull, null_mut};
use rust_decimal::Decimal;
use widestring::U16String;
use winapi::ctypes::c_void;
use winapi::shared::wtypes::{
CY, DATE, DECIMAL,
VARIANT_BOOL,
VT_ARRAY,
VT_BSTR,
VT_BOOL,
VT_BYREF,
VT_CY,
VT_DATE,
VT_DECIMAL,
VT_DISPATCH,
VT_EMPTY,
VT_ERROR,
VT_I1,
VT_I2,
VT_I4,
VT_I8,
VT_INT,
VT_NULL,
VT_R4,
VT_R8,
//VT_RECORD,
VT_UI1,
VT_UI2,
VT_UI4,
VT_UI8,
VT_UINT,
VT_UNKNOWN,
VT_VARIANT,
};
use winapi::shared::wtypesbase::SCODE;
use winapi::um::oaidl::{IDispatch, __tagVARIANT, SAFEARRAY, VARIANT, VARIANT_n3, VARIANT_n1};
use winapi::um::oleauto::VariantClear;
use winapi::um::unknwnbase::IUnknown;
use super::array::{SafeArrayElement, SafeArrayExt};
use super::bstr::BStringExt;
use super::errors::{IntoVariantError, FromVariantError};
use super::ptr::Ptr;
use super::types::{Date, DecWrapper, Currency, Int, SCode, UInt, VariantBool };
const VT_PUI1: u32 = VT_BYREF | VT_UI1;
const VT_PI2: u32 = VT_BYREF | VT_I2;
const VT_PI4: u32 = VT_BYREF | VT_I4;
const VT_PI8: u32 = VT_BYREF | VT_I8;
const VT_PUI8: u32 = VT_BYREF | VT_UI8;
const VT_PR4: u32 = VT_BYREF | VT_R4;
const VT_PR8: u32 = VT_BYREF | VT_R8;
const VT_PBOOL: u32 = VT_BYREF | VT_BOOL;
const VT_PERROR: u32 = VT_BYREF | VT_ERROR;
const VT_PCY: u32 = VT_BYREF | VT_CY;
const VT_PDATE: u32 = VT_BYREF | VT_DATE;
const VT_PBSTR: u32 = VT_BYREF | VT_BSTR;
const VT_PUNKNOWN: u32 = VT_BYREF | VT_UNKNOWN;
const VT_PDISPATCH: u32 = VT_BYREF | VT_DISPATCH;
const VT_PDECIMAL: u32 = VT_BYREF | VT_DECIMAL;
const VT_PI1: u32 = VT_BYREF | VT_I1;
const VT_PUI2: u32 = VT_BYREF | VT_UI2;
const VT_PUI4: u32 = VT_BYREF | VT_UI4;
const VT_PINT: u32 = VT_BYREF | VT_INT;
const VT_PUINT: u32 = VT_BYREF | VT_UINT;
/// Trait implemented to convert the type into a VARIANT
/// Do not implement this yourself without care.
pub trait VariantExt: Sized { //Would like Clone, but *mut IDispatch and *mut IUnknown don't implement them
/// VARTYPE constant value for the type
const VARTYPE: u32;
/// Call this associated function on a Ptr<VARIANT> to obtain a value T
fn from_variant(var: Ptr<VARIANT>) -> Result<Self, FromVariantError>;
/// Convert a value of type T into a Ptr<VARIANT>
fn into_variant(self) -> Result<Ptr<VARIANT>, IntoVariantError>;
}
/// Helper struct to wrap a VARIANT compatible type into a VT_VARIANT marked VARIANT
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Variant<T: VariantExt>(T);
/// `Variant<T: VariantExt>` type to wrap an impl of `VariantExt` - creates a VARIANT of VT_VARIANT
/// which wraps an inner variant that points to T.
impl<T: VariantExt> Variant<T> {
/// default constructor
pub fn new(t: T) -> Variant<T> {
Variant(t)
}
/// Get access to the inner value and the Variant is consumed
pub fn unwrap(self) -> T {
self.0
}
/// Borrow reference to inner value
pub fn borrow(&self) -> &T {
&self.0
}
/// Borrow mutable reference to inner value
pub fn borrow_mut(&mut self) -> &mut T {
&mut self.0
}
/// Converts the `Variant<T>` into a `Ptr<VARIANT>`
/// Returns `Result<Ptr<VARIANT>, IntoVariantError>`
pub fn into_variant(self) -> Result<Ptr<VARIANT>, IntoVariantError> {
#[allow(unused_mut)]
let mut n3: VARIANT_n3 = unsafe {mem::zeroed()};
let mut n1: VARIANT_n1 = unsafe {mem::zeroed()};
let var = self.0.into_variant()?;
let var = var.as_ptr();
unsafe {
let n_ptr = n3.pvarVal_mut();
*n_ptr = var;
};
let tv = __tagVARIANT { vt: <Self as VariantExt>::VARTYPE as u16,
wReserved1: 0,
wReserved2: 0,
wReserved3: 0,
n3: n3};
unsafe {
let n_ptr = n1.n2_mut();
*n_ptr = tv;
};
let var = Box::new(VARIANT{ n1: n1 });
Ok(Ptr::with_checked(Box::into_raw(var)).unwrap())
}
/// Converts `Ptr<VARIANT>` into `Variant<T>`
/// Returns `Result<Variant<T>>, FromVariantError>`
pub fn from_variant(var: Ptr<VARIANT>) -> Result<Variant<T>, FromVariantError> {
let var = var.as_ptr();
let mut _var_d = VariantDestructor::new(var);
let mut n1 = unsafe {(*var).n1};
let n3 = unsafe { n1.n2_mut().n3 };
let n_ptr = unsafe {
let n_ptr = n3.pvarVal();
*n_ptr
};
let pnn = match Ptr::with_checked(n_ptr) {
Some(nn) => nn,
None => return Err(FromVariantError::VariantPtrNull)
};
let t = T::from_variant(pnn).unwrap();
Ok(Variant(t))
}
}
impl<T: VariantExt> AsRef<T> for Variant<T> {
fn as_ref(&self) -> &T {
self.borrow()
}
}
impl<T: VariantExt> AsMut<T> for Variant<T> {
fn as_mut(&mut self) -> &mut T {
self.borrow_mut()
}
}
struct VariantDestructor {
inner: *mut VARIANT,
_marker: PhantomData<VARIANT>
}
impl VariantDestructor {
fn new(p: *mut VARIANT) -> VariantDestructor {
VariantDestructor {
inner: p,
_marker: PhantomData
}
}
}
impl Drop for VariantDestructor {
fn drop(&mut self) {
if self.inner.is_null() {
return;
}
unsafe { VariantClear(self.inner)};
unsafe { let _dtor = *self.inner;}
self.inner = null_mut();
}
}
macro_rules! variant_impl {
(
impl $(<$tn:ident : $tc:ident>)* VariantExt for $t:ty {
VARTYPE = $vt:expr ;
$n_name:ident, $un_n:ident, $un_n_mut:ident
from => {$from:expr}
into => {$into:expr}
}
) => {
impl $(<$tn: $tc>)* VariantExt for $t {
const VARTYPE: u32 = $vt;
fn from_variant(var: Ptr<VARIANT>) -> Result<Self, FromVariantError>{
let var = var.as_ptr();
let mut var_d = VariantDestructor::new(var);
#[allow(unused_mut)]
let mut n1 = unsafe {(*var).n1};
let vt = unsafe{n1.n2()}.vt;
if vt as u32 != Self::VARTYPE {
return Err(FromVariantError::VarTypeDoesNotMatch{expected: Self::VARTYPE, found: vt as u32})
}
let ret = variant_impl!(@read $n_name, $un_n, $from, n1);
var_d.inner = null_mut();
ret
}
fn into_variant(self) -> Result<Ptr<VARIANT>, IntoVariantError> {
#[allow(unused_mut)]
let mut n3: VARIANT_n3 = unsafe {mem::zeroed()};
let mut n1: VARIANT_n1 = unsafe {mem::zeroed()};
variant_impl!(@write $n_name, $un_n_mut, $into, n3, n1, self);
let tv = __tagVARIANT { vt: <Self as VariantExt>::VARTYPE as u16,
wReserved1: 0,
wReserved2: 0,
wReserved3: 0,
n3: n3};
unsafe {
let n_ptr = n1.n2_mut();
*n_ptr = tv;
};
let var = Box::new(VARIANT{ n1: n1 });
Ok(Ptr::with_checked(Box::into_raw(var)).unwrap())
}
}
};
(@read n3, $un_n:ident, $from:expr, $n1:ident) => {
{
let n3 = unsafe { $n1.n2_mut().n3 };
let ret = unsafe {
let n_ptr = n3.$un_n();
$from(n_ptr)
};
ret
}
};
(@read n1, $un_n:ident, $from:expr, $n1:ident) => {
{
let ret = unsafe {
let n_ptr = $n1.$un_n();
$from(n_ptr)
};
ret
}
};
(@write n3, $un_n_mut:ident, $into:expr, $n3:ident, $n1:ident, $slf:expr) => {
unsafe {
let n_ptr = $n3.$un_n_mut();
*n_ptr = $into($slf)?
}
};
(@write n1, $un_n_mut:ident, $into:expr, $n3:ident, $n1:ident, $slf:expr) => {
unsafe {
let n_ptr = $n1.$un_n_mut();
*n_ptr = $into($slf)?
}
};
}
variant_impl!{
impl VariantExt for i64 {
VARTYPE = VT_I8;
n3, llVal, llVal_mut
from => {|n_ptr: &i64| {Ok(*n_ptr)}}
into => {|slf: i64| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for i32 {
VARTYPE = VT_I4;
n3, lVal, lVal_mut
from => {|n_ptr: &i32| Ok(*n_ptr)}
into => {|slf: i32| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for u8 {
VARTYPE = VT_UI1;
n3, bVal, bVal_mut
from => {|n_ptr: &u8| Ok(*n_ptr)}
into => {|slf: u8| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for i16 {
VARTYPE = VT_I2;
n3, iVal, iVal_mut
from => {|n_ptr: &i16| Ok(*n_ptr)}
into => {|slf: i16| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for f32 {
VARTYPE = VT_R4;
n3, fltVal, fltVal_mut
from => {|n_ptr: &f32| Ok(*n_ptr)}
into => {|slf: f32| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for f64 {
VARTYPE = VT_R8;
n3, dblVal, dblVal_mut
from => {|n_ptr: &f64| Ok(*n_ptr)}
into => {|slf: f64| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for bool {
VARTYPE = VT_BOOL;
n3, boolVal, boolVal_mut
from => {|n_ptr: &VARIANT_BOOL| Ok(bool::from(VariantBool::from(*n_ptr)))}
into => {|slf: bool| -> Result<_, IntoVariantError> {
Ok(VARIANT_BOOL::from(VariantBool::from(slf)))
}}
}
}
variant_impl!{
impl VariantExt for SCode {
VARTYPE = VT_ERROR;
n3, scode, scode_mut
from => {|n_ptr: &SCODE| Ok(SCode::from(*n_ptr))}
into => {|slf: SCode| -> Result<_, IntoVariantError> {
Ok(i32::from(slf))
}}
}
}
variant_impl!{
impl VariantExt for Currency {
VARTYPE = VT_CY;
n3, cyVal, cyVal_mut
from => {|n_ptr: &CY| Ok(Currency::from(*n_ptr))}
into => {|slf: Currency| -> Result<_, IntoVariantError> {Ok(CY::from(slf))}}
}
}
variant_impl!{
impl VariantExt for Date {
VARTYPE = VT_DATE;
n3, date, date_mut
from => {|n_ptr: &DATE| Ok(Date::from(*n_ptr))}
into => {|slf: Date| -> Result<_, IntoVariantError> {Ok(DATE::from(slf))}}
}
}
variant_impl!{
impl VariantExt for String {
VARTYPE = VT_BSTR;
n3, bstrVal, bstrVal_mut
from => {|n_ptr: &*mut u16| {
let bstr = U16String::from_bstr(*n_ptr);
Ok(bstr.to_string_lossy())
}}
into => {|slf: String|{
let mut bstr = U16String::from_str(&slf);
match bstr.allocate_bstr(){
Ok(ptr) => Ok(ptr.as_ptr()),
Err(bse) => Err(IntoVariantError::from(bse))
}
}}
}
}
variant_impl!{
impl VariantExt for Ptr<IUnknown> {
VARTYPE = VT_UNKNOWN;
n3, punkVal, punkVal_mut
from => {|n_ptr: &* mut IUnknown| Ok(Ptr::with_checked(*n_ptr).unwrap())}
into => {|slf: Ptr<IUnknown>| -> Result<_, IntoVariantError> {Ok(slf.as_ptr())}}
}
}
variant_impl!{
impl VariantExt for Ptr<IDispatch> {
VARTYPE = VT_DISPATCH;
n3, pdispVal, pdispVal_mut
from => {|n_ptr: &*mut IDispatch| Ok(Ptr::with_checked(*n_ptr).unwrap())}
into => {|slf: Ptr<IDispatch>| -> Result<_, IntoVariantError> { Ok(slf.as_ptr()) }}
}
}
variant_impl!{
impl VariantExt for Box<u8> {
VARTYPE = VT_PUI1;
n3, pbVal, pbVal_mut
from => {|n_ptr: &* mut u8| Ok(Box::new(**n_ptr))}
into => {|slf: Box<u8>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<i16> {
VARTYPE = VT_PI2;
n3, piVal, piVal_mut
from => {|n_ptr: &* mut i16| Ok(Box::new(**n_ptr))}
into => {|slf: Box<i16>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<i32> {
VARTYPE = VT_PI4;
n3, plVal, plVal_mut
from => {|n_ptr: &* mut i32| Ok(Box::new(**n_ptr))}
into => {|slf: Box<i32>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<i64> {
VARTYPE = VT_PI8;
n3, pllVal, pllVal_mut
from => {|n_ptr: &* mut i64| Ok(Box::new(**n_ptr))}
into => {|slf: Box<i64>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<f32> {
VARTYPE = VT_PR4;
n3, pfltVal, pfltVal_mut
from => {|n_ptr: &* mut f32| Ok(Box::new(**n_ptr))}
into => {|slf: Box<f32>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<f64> {
VARTYPE = VT_PR8;
n3, pdblVal, pdblVal_mut
from => {|n_ptr: &* mut f64| Ok(Box::new(**n_ptr))}
into => {|slf: Box<f64>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<bool> {
VARTYPE = VT_PBOOL;
n3, pboolVal, pboolVal_mut
from => {
|n_ptr: &*mut VARIANT_BOOL| Ok(Box::new(bool::from(VariantBool::from(**n_ptr))))
}
into => {
|slf: Box<bool>|-> Result<_, IntoVariantError> {
Ok(Box::into_raw(Box::new(VARIANT_BOOL::from(VariantBool::from(*slf)))))
}
}
}
}
variant_impl!{
impl VariantExt for Box<SCode> {
VARTYPE = VT_PERROR;
n3, pscode, pscode_mut
from => {|n_ptr: &*mut SCODE| Ok(Box::new(SCode::from(**n_ptr)))}
into => {|slf: Box<SCode>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(Box::new(i32::from(*slf))))
}}
}
}
variant_impl!{
impl VariantExt for Box<Currency> {
VARTYPE = VT_PCY;
n3, pcyVal, pcyVal_mut
from => { |n_ptr: &*mut CY| Ok(Box::new(Currency::from(**n_ptr))) }
into => {
|slf: Box<Currency>|-> Result<_, IntoVariantError> {
Ok(Box::into_raw(Box::new(CY::from(*slf))))
}
}
}
}
variant_impl!{
impl VariantExt for Box<Date> {
VARTYPE = VT_PDATE;
n3, pdate, pdate_mut
from => { |n_ptr: &*mut f64| Ok(Box::new(Date::from(**n_ptr))) }
into => {
|slf: Box<Date>|-> Result<_, IntoVariantError> {
let bptr = Box::new(DATE::from(*slf));
Ok(Box::into_raw(bptr))
}
}
}
}
variant_impl!{
impl VariantExt for Box<String> {
VARTYPE = VT_PBSTR;
n3, pbstrVal, pbstrVal_mut
from => {|n_ptr: &*mut *mut u16| {
let bstr = U16String::from_bstr(**n_ptr);
Ok(Box::new(bstr.to_string_lossy()))
}}
into => {|slf: Box<String>| -> Result<_, IntoVariantError> {
let mut bstr = U16String::from_str(&*slf);
let bstr = Box::new(bstr.allocate_bstr().unwrap().as_ptr());
Ok(Box::into_raw(bstr))
}}
}
}
variant_impl! {
impl VariantExt for Box<Ptr<IUnknown>> {
VARTYPE = VT_PUNKNOWN;
n3, ppunkVal, ppunkVal_mut
from => {
|n_ptr: &*mut *mut IUnknown| {
match NonNull::new((**n_ptr).clone()) {
Some(nn) => Ok(Box::new(Ptr::new(nn))),
None => Err(FromVariantError::UnknownPtrNull)
}
}
}
into => {
|slf: Box<Ptr<IUnknown>>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(Box::new((*slf).as_ptr())))
}
}
}
}
variant_impl! {
impl VariantExt for Box<Ptr<IDispatch>> {
VARTYPE = VT_PDISPATCH;
n3, ppdispVal, ppdispVal_mut
from => {
|n_ptr: &*mut *mut IDispatch| {
match Ptr::with_checked((**n_ptr).clone()) {
Some(nn) => Ok(Box::new(nn)),
None => Err(FromVariantError::DispatchPtrNull)
}
}
}
into => {
|slf: Box<Ptr<IDispatch>>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(Box::new((*slf).as_ptr())))
}
}
}
}
variant_impl!{
impl<T: VariantExt> VariantExt for Variant<T> {
VARTYPE = VT_VARIANT;
n3, pvarVal, pvarVal_mut
from => {|n_ptr: &*mut VARIANT| {
let pnn = match Ptr::with_checked(*n_ptr) {
Some(nn) => nn,
None => return Err(FromVariantError::VariantPtrNull)
};
Variant::<T>::from_variant(pnn)
}}
into => {|slf: Variant<T>| -> Result<_, IntoVariantError> {
let pvar = slf.into_variant().unwrap();
Ok(pvar.as_ptr())
}}
}
}
variant_impl!{
impl<T: SafeArrayElement> VariantExt for Vec<T>{
VARTYPE = VT_ARRAY;
n3, parray, parray_mut
from => {
|n_ptr: &*mut SAFEARRAY| {
match ExactSizeIterator::<Item=T>::from_safearray(*n_ptr) {
Ok(sa) => Ok(sa),
Err(fsae) => Err(FromVariantError::from(fsae))
}
}
}
into => {
|slf: Vec<T>| -> Result<_, IntoVariantError> {
match slf.into_iter().into_safearray() {
Ok(psa) => {
Ok(psa.as_ptr())
},
Err(isae) => {
Err(IntoVariantError::from(isae))
}
}
}
}
}
}
variant_impl!{
impl VariantExt for Ptr<c_void> {
VARTYPE = VT_BYREF;
n3, byref, byref_mut
from => {|n_ptr: &*mut c_void| {
match Ptr::with_checked(*n_ptr) {
Some(nn) => Ok(nn),
None => Err(FromVariantError::CVoidPtrNull)
}
}}
into => {|slf: Ptr<c_void>| -> Result<_, IntoVariantError> {
Ok(slf.as_ptr())
}}
}
}
variant_impl!{
impl VariantExt for i8 {
VARTYPE = VT_I1;
n3, cVal, cVal_mut
from => {|n_ptr: &i8|Ok(*n_ptr)}
into => {|slf: i8| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for u16 {
VARTYPE = VT_UI2;
n3, uiVal, uiVal_mut
from => {|n_ptr: &u16|Ok(*n_ptr)}
into => {|slf: u16| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for u32 {
VARTYPE = VT_UI4;
n3, ulVal, ulVal_mut
from => {|n_ptr: &u32|Ok(*n_ptr)}
into => {|slf: u32| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for u64 {
VARTYPE = VT_UI8;
n3, ullVal, ullVal_mut
from => {|n_ptr: &u64|Ok(*n_ptr)}
into => {|slf: u64| -> Result<_, IntoVariantError> {Ok(slf)}}
}
}
variant_impl!{
impl VariantExt for Int {
VARTYPE = VT_INT;
n3, intVal, intVal_mut
from => {|n_ptr: &i32| Ok(Int::from(*n_ptr))}
into => {|slf: Int| -> Result<_, IntoVariantError> {Ok(i32::from(slf))}}
}
}
variant_impl!{
impl VariantExt for UInt {
VARTYPE = VT_UINT;
n3, uintVal, uintVal_mut
from => {|n_ptr: &u32| Ok(UInt::from(*n_ptr))}
into => {|slf: UInt| -> Result<_, IntoVariantError> { Ok(u32::from(slf))}}
}
}
variant_impl!{
impl VariantExt for Box<DecWrapper> {
VARTYPE = VT_PDECIMAL;
n3, pdecVal, pdecVal_mut
from => {|n_ptr: &*mut DECIMAL|Ok(Box::new(DecWrapper::from(**n_ptr)))}
into => {|slf: Box<DecWrapper>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw( Box::new(DECIMAL::from(*slf))))
}}
}
}
variant_impl!{
impl VariantExt for Box<Decimal> {
VARTYPE = VT_PDECIMAL;
n3, pdecVal, pdecVal_mut
from => {|n_ptr: &*mut DECIMAL|Ok(Box::new(Decimal::from(DecWrapper::from(**n_ptr))))}
into => {|slf: Box<Decimal>| -> Result<_, IntoVariantError> {
let bptr = Box::new(DECIMAL::from(DecWrapper::from(*slf)));
Ok(Box::into_raw(bptr))
}}
}
}
variant_impl!{
impl VariantExt for Box<i8> {
VARTYPE = VT_PI1;
n3, pcVal, pcVal_mut
from => {|n_ptr: &*mut i8|Ok(Box::new(**n_ptr))}
into => {|slf: Box<i8>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<u16> {
VARTYPE = VT_PUI2;
n3, puiVal, puiVal_mut
from => {|n_ptr: &*mut u16|Ok(Box::new(**n_ptr))}
into => {|slf: Box<u16>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<u32> {
VARTYPE = VT_PUI4;
n3, pulVal, pulVal_mut
from => {|n_ptr: &*mut u32|Ok(Box::new(**n_ptr))}
into => {|slf: Box<u32>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<u64> {
VARTYPE = VT_PUI8;
n3, pullVal, pullVal_mut
from => {|n_ptr: &*mut u64|Ok(Box::new(**n_ptr))}
into => {|slf: Box<u64>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(slf))
}}
}
}
variant_impl!{
impl VariantExt for Box<Int> {
VARTYPE = VT_PINT;
n3, pintVal, pintVal_mut
from => {|n_ptr: &*mut i32| Ok(Box::new(Int::from(**n_ptr)))}
into => {|slf: Box<Int>|-> Result<_, IntoVariantError> {
Ok(Box::into_raw(Box::new(i32::from(*slf))))
}}
}
}
variant_impl!{
impl VariantExt for Box<UInt> {
VARTYPE = VT_PUINT;
n3, puintVal, puintVal_mut
from => {|n_ptr: &*mut u32| Ok(Box::new(UInt::from(**n_ptr)))}
into => {|slf: Box<UInt>| -> Result<_, IntoVariantError> {
Ok(Box::into_raw(Box::new(u32::from(*slf))))
}}
}
}
variant_impl!{
impl VariantExt for DecWrapper {
VARTYPE = VT_DECIMAL;
n1, decVal, decVal_mut
from => {|n_ptr: &DECIMAL|Ok(DecWrapper::from(*n_ptr))}
into => {|slf: DecWrapper| -> Result<_, IntoVariantError> {
Ok(DECIMAL::from(slf))
}}
}
}
variant_impl!{
impl VariantExt for Decimal {
VARTYPE = VT_DECIMAL;
n1, decVal, decVal_mut
from => {|n_ptr: &DECIMAL| Ok(Decimal::from(DecWrapper::from(*n_ptr)))}
into => {|slf: Decimal| -> Result<_, IntoVariantError> {
Ok(DECIMAL::from(DecWrapper::from(slf)))
}}
}
}
/// Helper type for VT_EMPTY variants
#[derive(Clone, Copy, Debug)]
pub struct VtEmpty{}
/// Helper type for VT_NULL variants
#[derive(Clone, Copy, Debug)]
pub struct VtNull{}
impl VariantExt for VtEmpty {
const VARTYPE: u32 = VT_EMPTY;
fn into_variant(self) -> Result<Ptr<VARIANT>, IntoVariantError> {
let n3: VARIANT_n3 = unsafe {mem::zeroed()};
let mut n1: VARIANT_n1 = unsafe {mem::zeroed()};
let tv = __tagVARIANT { vt: <Self as VariantExt>::VARTYPE as u16,
wReserved1: 0,
wReserved2: 0,
wReserved3: 0,
n3: n3};
unsafe {
let n_ptr = n1.n2_mut();
*n_ptr = tv;
};
let var = Box::new(VARIANT{ n1: n1 });
Ok(Ptr::with_checked(Box::into_raw(var)).unwrap())
}
fn from_variant(var: Ptr<VARIANT>) -> Result<Self, FromVariantError> {
let _var_d = VariantDestructor::new(var.as_ptr());
Ok(VtEmpty{})
}
}
impl VariantExt for VtNull {
const VARTYPE: u32 = VT_NULL;
fn into_variant(self) -> Result<Ptr<VARIANT>, IntoVariantError> {
let n3: VARIANT_n3 = unsafe {mem::zeroed()};
let mut n1: VARIANT_n1 = unsafe {mem::zeroed()};
let tv = __tagVARIANT { vt: <Self as VariantExt>::VARTYPE as u16,
wReserved1: 0,
wReserved2: 0,
wReserved3: 0,
n3: n3};
unsafe {
let n_ptr = n1.n2_mut();
*n_ptr = tv;
};
let var = Box::new(VARIANT{ n1: n1 });
Ok(Ptr::with_checked(Box::into_raw(var)).unwrap())
}
fn from_variant(var: Ptr<VARIANT>) -> Result<Self, FromVariantError> {
let _var_d = VariantDestructor::new(var.as_ptr());
Ok(VtNull{})
}
}
#[cfg(test)]
mod test {
use super::*;
macro_rules! validate_variant {
($t:ident, $val:expr, $vt:expr) => {
let v = $val;
let var = match v.clone().into_variant() {
Ok(var) => var,
Err(_) => panic!("Error")
};
assert!(!var.as_ptr().is_null());
unsafe {
let pvar = var.as_ptr();
let n1 = (*pvar).n1;
let tv: &__tagVARIANT = n1.n2();
assert_eq!(tv.vt as u32, $vt);
};
let var = $t::from_variant(var);
assert_eq!(v, var.unwrap());
};
}
#[test]
fn test_i64() {
validate_variant!(i64, 1337i64, VT_I8);
}
#[test]
fn test_i32() {
validate_variant!(i32, 1337i32, VT_I4);
}
#[test]
fn test_u8() {
validate_variant!(u8, 137u8, VT_UI1);
}
#[test]
fn test_i16() {
validate_variant!(i16, 1337i16, VT_I2);
}
#[test]
fn test_f32() {
validate_variant!(f32, 1337.9f32, VT_R4);
}
#[test]
fn test_f64() {
validate_variant!(f64, 1337.9f64, VT_R8);
}
#[test]
fn test_bool_t() {
validate_variant!(bool, true, VT_BOOL);
}
#[test]
fn test_bool_f() {
validate_variant!(bool, false, VT_BOOL);
}
#[test]
fn test_scode() {
validate_variant!(SCode, SCode::from(137), VT_ERROR);
}
#[test]
fn test_cy() {
validate_variant!(Currency, Currency::from(137), VT_CY);
}
#[test]
fn test_date() {
validate_variant!(Date, Date::from(137.7), VT_DATE);
}
#[test]
fn test_str() {
validate_variant!(String, String::from("testing abc1267 ?Ťũřǐꝥꞔ"), VT_BSTR);
}
#[test]
fn test_box_u8() {
type Bu8 = Box<u8>;
validate_variant!(Bu8, Box::new(139), VT_PUI1);
}
#[test]
fn test_box_i16() {
type Bi16 = Box<i16>;
validate_variant!(Bi16, Box::new(139), VT_PI2);
}
#[test]
fn test_box_i32() {
type Bi32 = Box<i32>;
validate_variant!(Bi32, Box::new(139), VT_PI4);
}
#[test]
fn test_box_i64() {
type Bi64 = Box<i64>;
validate_variant!(Bi64, Box::new(139), VT_PI8);
}
#[test]
fn test_box_f32() {
type Bf32 = Box<f32>;
validate_variant!(Bf32, Box::new(1337.9f32), VT_PR4);
}
#[test]
fn test_box_f64() {
type Bf64 = Box<f64>;
validate_variant!(Bf64, Box::new(1337.9f64), VT_PR8);
}
#[test]
fn test_box_bool() {
type Bbool = Box<bool>;
validate_variant!(Bbool, Box::new(true), VT_PBOOL);
}
#[test]
fn test_box_scode() {
type BSCode = Box<SCode>;
validate_variant!(BSCode, Box::new(SCode::from(-50)), VT_PERROR);
}
#[test]
fn test_box_cy() {
type BCy = Box<Currency>;
validate_variant!(BCy, Box::new(Currency::from(137)), VT_PCY);
}
#[test]
fn test_box_date() {
type BDate = Box<Date>;
validate_variant!(BDate, Box::new(Date::from(-10.333f64)), VT_PDATE);
}
#[test]
fn test_box_str() {
type BStr = Box<String>;
validate_variant!(BStr, Box::new(String::from("testing abc1267 ?Ťũřǐꝥꞔ")), VT_PBSTR);
}
#[test]
fn test_variant() {
let v = Variant::new(1000u64);
let var = match v.into_variant() {
Ok(var) => var,
Err(_) => panic!("Error")
};
assert!(!var.as_ptr().is_null());
unsafe {
let pvar = var.as_ptr();
let n1 = (*pvar).n1;
let tv: &__tagVARIANT = n1.n2();
assert_eq!(tv.vt as u32, VT_VARIANT);
};
let var = Variant::<u64>::from_variant(var);
assert_eq!(v, var.unwrap());
}
//test SafeArray<T>
//Ptr<c_void>
#[test]
fn test_i8() {
validate_variant!(i8, -119i8, VT_I1);
}
#[test]
fn test_u16() {
validate_variant!(u16, 119u16, VT_UI2);
}
#[test]
fn test_u32() {
validate_variant!(u32, 11976u32, VT_UI4);
}
#[test]
fn test_u64() {
validate_variant!(u64, 11976u64, VT_UI8);
}
#[test]
fn test_box_i8() {
type Bi8 = Box<i8>;
validate_variant!(Bi8, Box::new(-119i8), VT_PI1);
}
#[test]
fn test_box_u16() {
type Bu16 = Box<u16>;
validate_variant!(Bu16, Box::new(119u16), VT_PUI2);
}
#[test]
fn test_box_u32() {
type Bu32 = Box<u32>;
validate_variant!(Bu32, Box::new(11976u32), VT_PUI4);
}
#[test]
fn test_box_u64() {
type Bu64 = Box<u64>;
validate_variant!(Bu64, Box::new(11976u64), VT_PUI8);
}
#[test]
fn test_send() {
fn assert_send<T: Send>() {}
assert_send::<Variant<i64>>();
}
#[test]
fn test_sync() {
fn assert_sync<T: Sync>() {}
assert_sync::<Variant<i64>>();
}
} | 31.08181 | 112 | 0.505334 |
56a2dc674fc17425537fe57430de3a6551f9f340 | 1,090 | use math::IntVector;
use sdl2::rect::Rect as SdlRect;
#[derive(Debug, Copy, PartialEq, Clone)]
pub struct IntRect {
pub xy: IntVector,
pub width: u32,
pub height: u32,
}
impl IntRect {
fn new(x: i32, y: i32, width: u32, height: u32) -> IntRect {
IntRect {
xy: IntVector::new(x, y),
width: width,
height: height,
}
}
pub fn x(&self) -> i32 {
self.xy.x
}
pub fn y(&self) -> i32 {
self.xy.y
}
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
}
impl From<SdlRect> for IntRect {
fn from(sdl_rect: SdlRect) -> IntRect {
IntRect::new(sdl_rect.x(),
sdl_rect.y(),
sdl_rect.width(),
sdl_rect.height())
}
}
impl From<IntRect> for SdlRect {
fn from(int_rect: IntRect) -> SdlRect {
SdlRect::new(int_rect.x(),
int_rect.y(),
int_rect.width(),
int_rect.height())
}
}
| 20.185185 | 64 | 0.481651 |
48ac4d85bf06ca4d69485dca49cdf6972f274d5b | 15,116 | //!
//! Debug helper methods for enum variants that are familiar from [`Option`] & [`Result`] such as [`Option::unwrap_or`] or [`Result::and_then`].
//! # Example
//! ```
//! #[derive(variantly::Variantly)]
//! enum Color {
//! RGB(u8, u8, u8),
//! HSV(u8, u8, u8),
//! Grey(u8),
//! FromOutOfSpace,
//! #[variantly(rename = "darkness")]
//! Black,
//! }
//!
//! fn example() {
//! let color = Color::HSV(123, 45, 67);
//!
//! // boolean helper method for determining variant:
//! assert!(color.is_hsv());
//! assert!(!color.is_rgb());
//!
//! // Get inner values:
//! let (h, s, v) = color.unwrap_hsv();
//! assert_eq!((h, s, v), (123, 45, 67));
//!
//! // Single values don't require tuple destructuring:
//! let color = Color::Grey(128);
//! let value = color.unwrap_grey();
//! assert_eq!(value, 128);
//!
//! // Alter inner value, only if hsv:
//! let color = Color::HSV(111, 22, 33);
//! let color = color.and_then_hsv(|(h, s, _)| (h, s, 100));
//! assert_eq!(color.unwrap_hsv(), (111, 22, 100));
//!
//! // Safely unwrap with a fallback:
//! let color = Color::RGB(255, 255, 0);
//! let (r, g, b) = color.unwrap_or_rgb((0, 0, 0));
//! assert_eq!((r, g, b), (255, 255, 0));
//! // Since color is of the HSV variant, the default is not used.
//!
//! // Safely unwrap using the fallback
//! let color = Color::FromOutOfSpace;
//! let (r, g, b) = color.unwrap_or_rgb((0, 0, 0));
//! assert_eq!((r, g, b), (0, 0, 0));
//!
//! // Convert into an Option
//! let color = Color::RGB(0, 255, 255);
//! let optional_rgb = color.rgb();
//! assert_eq!(Some((0, 255, 255)), optional_rgb);
//!
//! // Convert into a Result
//! let color = Color::RGB(255, 0, 255);
//! let result_rgb = color.rgb_or("Error: This is not an RGB variant!");
//! assert_eq!(Ok((255, 0, 255)), result_rgb);
//!
//! // Operations like this can also use their familiar `_else` versions:
//! let color = Color::FromOutOfSpace;
//! let result_rgb = color.rgb_or_else(|| Some("This is a computationally expensive error!"));
//! assert!(result_rgb.is_err());
//!
//! // The `#[variantly(rename = "darkness")]` attribute renames derived methods:
//! let color = Color::Black;
//! assert!(color.is_darkness())
//! }
//! ```
//! # Derived Methods
//! In the naming of all methods described here, replace the `{variant_name}` with the snake_case formatted name of the given variant.
//!
//! ## Option & Result Conversion
//! Use the below methods to convert the enum into either an option or result:
//!
//! ### `pub fn {variant_name}(self) -> Option(...)`
//! If the enum is of the given variant, returns a [`Some`] containing the inner variant value. Otherwise, return [`None`].
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color = Color::HSV(1,2,3);
//!
//! let option = color.hsv();
//! assert_eq!(Some((1, 2, 3)), option);
//!
//! let color = Color::FromOutOfSpace;
//! assert_eq!(None, color.rgb());
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ### `pub fn {variant_name}_or<E>(self, err: E) -> Result<(...), E>`
//! If the enum is of the given variant, returns a [`Result::Ok`] containing the inner value. Otherwise, return [`Result::Err`] containing `err`.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color = Color::HSV(1,2,3);
//!
//! let result = color.hsv_or("Error: Not an HSV!");
//! assert_eq!(Ok((1, 2, 3)), result);
//!
//! let color = Color::FromOutOfSpace;
//! let result = color.hsv_or("Error: Not an HSV!");
//! assert_eq!(Err("Error: Not an HSV!"), result);
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ### `pub fn {variant_name}_or_else<E, F: FnOnce() -> E>(self, f: F) -> Result<(...), E>`
//! If the enum is of the given variant, returns a [`Result::Ok`] containing the inner variant value. Otherwise, calls `f` to calculate a [`Result::Err`].
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color = Color::HSV(1,2,3);
//!
//! let result = color.hsv_or_else(|| "This is an expensive error to create.");
//! assert_eq!(Ok((1, 2, 3)), result);
//!
//! let color = Color::FromOutOfSpace;
//! let result = color.hsv_or_else(|| "This is an expensive error to create.");
//! assert_eq!(Err("This is an expensive error to create."), result);
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ## Accessing Inner Values
//! Use the below methods to easily access the inner value of a given variant.
//!
//! ### `pub fn expect_{variant_name}(self, msg: &str) -> (...)`
//! Returns the contained value.
//!
//! #### Panics
//! Panics if the enum is not of the given variant with the custom message `msg`.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color_a = Color::HSV(1,2,3);
//! let color_b = Color::Grey(10);
//!
//! let (h, s, v) = color_a.expect_hsv("This should be an hsv");
//! assert_eq!((h, s, v), (1, 2, 3));
//!
//! let grey = color_b.expect_grey("This should be grey");
//! assert_eq!(grey, 10);
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ### `pub fn unwrap_{variant_name}(self) -> (...)`
//! Returns the contained value.
//!
//! #### Panics
//! Panics if the enum is not of the given variant.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color_a = Color::HSV(1,2,3);
//! let color_b = Color::Grey(10);
//!
//! let (h, s, v) = color_a.unwrap_hsv();
//! assert_eq!((h, s, v), (1, 2, 3));
//!
//! let grey = color_b.unwrap_grey();
//! assert_eq!(grey, 10);
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ### `pub fn unwrap_or_{variant_name}(self, fallback: (...)) -> (...)`
//! Returns the contained value if the enum is of the given variant, otherwise returns the provided `fallback`.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color_a = Color::HSV(1,2,3);
//! let color_b = Color::Grey(10);
//!
//! let (h, s, v) = color_a.unwrap_or_hsv((4, 5, 6));
//! assert_eq!((h, s, v), (1, 2, 3));
//!
//! let color = color_b.unwrap_or_rgb((4, 5, 6));
//! assert_eq!(color, (4, 5, 6));
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ### `pub fn unwrap_or_else_{variant_name}<F: FnOnce() -> (...)>(self, f: F) -> (...)`
//! Returns the contained value if the enum is of the given variant, otherwise computes a fallback from `f`.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color_a = Color::HSV(1,2,3);
//! let color_b = Color::Grey(10);
//!
//! let (h, s, v) = color_a.unwrap_or_else_hsv(|| (4,5,6));
//! assert_eq!((h, s, v), (1, 2, 3));
//!
//! let (h, s, v) = color_b.unwrap_or_else_hsv(|| (4,5,6));
//! assert_eq!((h, s, v), (4, 5, 6));
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ## Testing Variant Type
//! Use the below methods to test whether a variant is of the given type.
//!
//! ### `pub fn is_{variant_name}(self) -> bool`
//! Returns `true` if the enum is of the given variant.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color = Color::FromOutOfSpace;
//! assert!(color.is_from_out_of_space());
//! ```
//!
//! *Note: Available for all variant types*
//!
//! ### `pub fn is_not_{variant_name}(self) -> bool`
//! Returns `true` if the enum is *not* of the given variant.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color = Color::HSV(1,2,3);
//! assert!(color.is_not_rgb());
//! ```
//!
//! *Note: Available for all variant types*
//!
//! ## Compare & Process Specific Variant
//! Use the below to process and compare a specific enum variant.
//!
//! ### `pub fn and_{variant_name}(self, enum_b: GivenEnum) -> GivenEnum`
//! Returns `enum_b` if both self and `enum_b` are of the given variant. Otherwise returns `self`.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color_a = Color::HSV(1,2,3);
//! let color_b = Color::HSV(4,5,6);
//! let and = color_a.and_hsv(color_b);
//! assert_eq!(
//! and,
//! Color::HSV(4,5,6),
//! );
//! ```
//!
//! *Available for all variant types*
//!
//! ### `pub fn and_then_{variant_name}<F: FnOnce((...)) -> (...)>(self, f: F) -> Self`
//! Returns the enum as is if it is not of the given variant, otherwise calls `f` with the wrapped value and returns the result.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color_a = Color::HSV(1,2,3);
//!
//! let and = color_a.and_then_hsv(|(h, s, _)| (h, s, 4));
//! assert_eq!(
//! and,
//! Color::HSV(1, 2, 4),
//! );
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! ### `pub fn or_{variant_name}(self, enum_b: GivenEnum) -> GivenEnum`
//! Returns `self` if it is of the given variant, otherwise returns `enum_b`.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color_a = Color::HSV(1,2,3);
//! let color_b = Color::RGB(4,5,6);
//! let or = color_a.or_rgb(color_b);
//! assert_eq!(
//! or,
//! Color::RGB(4,5,6),
//! );
//! ```
//!
//! *Available for all variant types*
//!
//! ### `pub fn or_else_{variant_name}<F: FnOnce() -> (...)>(self, f: F) -> Self {`
//! Returns `self` if it is of the given variant, otherwise calls `f` and returns the result.
//!
//! #### Example
//! ```
//! # #[derive(variantly::Variantly, Debug, PartialEq)]
//! # enum Color {
//! # RGB(u8, u8, u8),
//! # HSV(u8, u8, u8),
//! # Grey(u8),
//! # FromOutOfSpace,
//! # #[variantly(rename = "darkness")]
//! # Black,
//! # }
//! let color = Color::HSV(1,2,3);
//! let color = color.or_else_rgb(|| (4,5,6));
//! assert_eq!(
//! color,
//! Color::RGB(4,5,6),
//! );
//! ```
//!
//! *Note: Available only for tuple-style variants such as Color::RGB(200, 40, 180), or Color::Grey(10)*
//!
//! # Renaming Methods
//! The `variantly` attribute may be placed on a variant in order to customize the resulting method names. The value set against `rename` inside the attribute will be used in place of the snake_cased variant name when constructing derived method names.
//! ```
//! #[derive(variantly::Variantly)]
//! enum SomeEnum {
//! #[variantly(rename = "variant_a")]
//! SomeVariantWithALongName(String),
//! VariantB,
//! }
//!
//! let variant = SomeEnum::SomeVariantWithALongName(String::from("Hello"));
//! assert!(variant.is_variant_a());
//! ```
//! Methods associated with `SomeVariantWithALongName` will now be accessible only with the `variant_a`
//! suffix, such as `.unwrap_or_else_variant_a()`. This can help control overly verbose fn names.
//! Note that the input to `rename` is used as is and is not coerced into snake_case.
//!
//! The above is also relevant when two variant names would expand to create conflicting method names:
//! ```
//! #[derive(variantly::Variantly)]
//! enum SomeEnum {
//! #[variantly(rename = "capital")]
//! ABC,
//! #[variantly(rename = "lower")]
//! abc,
//! }
//! ```
//! Without the `rename` attribute in the above, both variants would create conflicting methods such as `.is_abc()` due to the coercion to snake_case.
//! This is avoided by using the rename input to create meaningful and unique fn names.
//!
//! #### License
//!
//! <sup>
//! Licensed under <a href="LICENSE">MIT license</a>.
//! </sup>
//!
//! <br>
//!
//! <sub>
//! Unless you explicitly state otherwise, any contribution intentionally submitted
//! for inclusion in this crate shall be licensed as above, without any additional terms or conditions.
//! </sub>
#[macro_use]
extern crate darling;
extern crate proc_macro;
#[macro_use]
mod idents;
mod derive;
mod error;
mod input;
use derive::derive_variantly_fns;
use proc_macro::TokenStream;
use syn::{parse_macro_input, ItemEnum};
/// The `Variantly` derive macro. See [the module level documentation](self) for more information.
#[proc_macro_derive(Variantly, attributes(variantly))]
pub fn variantly(input: TokenStream) -> TokenStream {
let item_enum = parse_macro_input!(input as ItemEnum);
derive_variantly_fns(item_enum).unwrap_or_else(|err| err.to_compile_error())
}
| 32.025424 | 252 | 0.572771 |
f795123053e8827826f963a6382dea9dafdaa30a | 3,894 | //! Source code example of how to create your own widget.
//! This is meant to be read as a tutorial, hence the plethora of comments.
/// iOS-style toggle switch:
///
/// ``` text
/// _____________
/// / /.....\
/// | |.......|
/// \_______\_____/
/// ```
pub fn toggle(ui: &mut egui::Ui, on: &mut bool) -> egui::Response {
// Widget code can be broken up in four steps:
// 1. Decide a size for the widget
// 2. Allocate space for it
// 3. Handle interactions with the widget (if any)
// 4. Paint the widget
// 1. Deciding widget size:
// You can query the `ui` how much space is available,
// but in this example we have a fixed size widget of the default size for a button:
let desired_size = ui.style().spacing.interact_size;
// 2. Allocating space:
// This is where we get a region of the screen assigned.
// We also tell the Ui to sense clicks in the allocated region.
let (rect, response) = ui.allocate_exact_size(desired_size, egui::Sense::click());
// 3. Interact: Time to check for clicks!.
if response.clicked {
*on = !*on;
}
// 4. Paint!
// First let's ask for a simple animation from Egui.
// Egui keeps track of changes in the boolean associated with the id and
// returns an animated value in the 0-1 range for how much "on" we are.
let how_on = ui.ctx().animate_bool(response.id, *on);
// We will follow the current style by asking
// "how should something that is being interacted with be painted?".
// This will, for instance, give us different colors when the widget is hovered or clicked.
let visuals = ui.style().interact(&response);
let off_bg_fill = egui::Rgba::TRANSPARENT;
let on_bg_fill = egui::Rgba::from_rgb(0.0, 0.5, 0.25);
let bg_fill = egui::lerp(off_bg_fill..=on_bg_fill, how_on);
// All coordinates are in absolute screen coordinates so we use `rect` to place the elements.
let radius = 0.5 * rect.height();
ui.painter().rect(rect, radius, bg_fill, visuals.bg_stroke);
// Paint the circle, animating it from left to right with `how_on`:
let circle_x = egui::lerp((rect.left() + radius)..=(rect.right() - radius), how_on);
let center = egui::pos2(circle_x, rect.center().y);
ui.painter()
.circle(center, 0.75 * radius, visuals.fg_fill, visuals.fg_stroke);
// All done! Return the interaction response so the user can check what happened
// (hovered, clicked, ...) and maybe show a tooltip:
response
}
/// Here is the same code again, but a bit more compact:
#[allow(dead_code)]
fn toggle_compact(ui: &mut egui::Ui, on: &mut bool) -> egui::Response {
let desired_size = ui.style().spacing.interact_size;
let (rect, response) = ui.allocate_exact_size(desired_size, egui::Sense::click());
*on ^= response.clicked; // toggle if clicked
let how_on = ui.ctx().animate_bool(response.id, *on);
let visuals = ui.style().interact(&response);
let off_bg_fill = egui::Rgba::TRANSPARENT;
let on_bg_fill = egui::Rgba::from_rgb(0.0, 0.5, 0.25);
let bg_fill = egui::lerp(off_bg_fill..=on_bg_fill, how_on);
let radius = 0.5 * rect.height();
ui.painter().rect(rect, radius, bg_fill, visuals.bg_stroke);
let circle_x = egui::lerp((rect.left() + radius)..=(rect.right() - radius), how_on);
let center = egui::pos2(circle_x, rect.center().y);
ui.painter()
.circle(center, 0.75 * radius, visuals.fg_fill, visuals.fg_stroke);
response
}
pub fn demo(ui: &mut egui::Ui, on: &mut bool) {
ui.horizontal_wrapped_for_text(egui::TextStyle::Button, |ui| {
ui.label("It's easy to create your own widgets!");
ui.label("This toggle switch is just one function and 15 lines of code:");
toggle(ui, on).on_hover_text("Click to toggle");
ui.add(crate::__egui_github_link_file!());
});
}
| 43.266667 | 97 | 0.650231 |
fe54601d0645faeda659e6e59b52719a948ddfe4 | 5,844 | use super::super::iam;
use super::super::ratelimiting;
use rocket::http::Status as HttpStatus;
use rocket::http::{Cookies, Method};
use rocket::request::{self, FromRequest, Request};
use rocket::response::status::Custom;
use rocket::response::{self, Responder};
use rocket::{self, Config, Outcome};
use rocket_contrib::databases::diesel::PgConnection;
use rocket_contrib::databases::redis::Connection as RedisConnection;
use rocket_contrib::json::JsonValue;
use rocket_cors::{AllowedHeaders, AllowedOrigins, Cors, CorsOptions};
use uuid::Uuid;
#[database("digester")]
pub struct DigesterDbConn(pub PgConnection);
#[database("redis")]
pub struct Redis(pub RedisConnection);
pub enum JsonResponse {
Ok(JsonValue),
BadRequest(String),
InternalServerError,
NotFound,
Forbidden,
Unauthorized,
TooManyRequests,
UnprocessableEntity,
}
impl<'r> Responder<'r> for JsonResponse {
fn respond_to(self, req: &Request) -> response::Result<'r> {
let (body, status) = match self {
JsonResponse::Ok(body) => (body, HttpStatus::Ok),
JsonResponse::BadRequest(error) => (json!({ "error": error }), HttpStatus::BadRequest),
JsonResponse::InternalServerError => (json!({}), HttpStatus::InternalServerError),
JsonResponse::NotFound => (json!({}), HttpStatus::NotFound),
JsonResponse::Unauthorized => (json!({}), HttpStatus::Unauthorized),
JsonResponse::Forbidden => (json!({}), HttpStatus::Forbidden),
JsonResponse::TooManyRequests => (json!({}), HttpStatus::TooManyRequests),
JsonResponse::UnprocessableEntity => (json!({}), HttpStatus::UnprocessableEntity),
};
Custom(status, body).respond_to(req)
}
}
#[derive(Clone)]
pub struct Protected(pub iam::Session);
static UNAUTHORIZED: request::Outcome<Protected, ()> =
Outcome::Failure((HttpStatus::Unauthorized, ()));
static INTERNAL_SERVER_ERROR: request::Outcome<Protected, ()> =
Outcome::Failure((HttpStatus::InternalServerError, ()));
// __Host- prefix: see cookie prefix: https://www.sjoerdlangkemper.nl/2017/02/09/cookie-prefixes/
pub static SESSION_ID: &str = "__Host-SessionId";
impl<'a, 'r> FromRequest<'a, 'r> for Protected {
type Error = ();
fn from_request(req: &'a Request<'r>) -> request::Outcome<Protected, ()> {
let cookies = match Cookies::from_request(req) {
Outcome::Success(cookies) => cookies,
other => {
eprintln!("Failed to get cookies from request: {:?}", other);
return INTERNAL_SERVER_ERROR.clone();
}
};
let session_id = match cookies.get(SESSION_ID) {
None => {
eprintln!("No session id sent. Returning unauthorized");
return UNAUTHORIZED.clone();
}
Some(cookie) => match Uuid::parse_str(cookie.value()) {
Ok(session_id) => session_id,
Err(_) => {
eprintln!("Failed to parse session id. Returning unauthorized");
return UNAUTHORIZED.clone();
}
},
};
let redis = match Redis::from_request(req) {
Outcome::Success(redis) => redis,
other => {
eprintln!("Failed to get redis from request: {:?}", other);
return INTERNAL_SERVER_ERROR.clone();
}
};
match iam::fetch_session(&redis, session_id) {
Ok(Some(session)) => Outcome::Success(Protected(session)),
Ok(None) => {
eprintln!("Session id not found in iam. Returning unauthorized");
UNAUTHORIZED.clone()
}
Err(err) => {
eprintln!("Failed to fetch session: {:?}", err);
INTERNAL_SERVER_ERROR.clone()
}
}
}
}
pub struct RateLimited {}
impl<'a, 'r> FromRequest<'a, 'r> for RateLimited {
type Error = ();
fn from_request(req: &'a Request<'r>) -> request::Outcome<RateLimited, ()> {
let mut redis = match Redis::from_request(req) {
Outcome::Success(redis) => redis,
other => {
eprintln!("Failed to get redis from request: {:?}", other);
return Outcome::Failure((HttpStatus::InternalServerError, ()));
}
};
let ip = req
.headers()
.get_one("X-Forwarded-For")
.unwrap_or("[no-ip]");
use ratelimiting::RateLimitError::*;
match ratelimiting::rate_limit(&mut redis, ip) {
Ok(()) => Outcome::Success(RateLimited {}),
Err(TooManyRequests) => Outcome::Failure((HttpStatus::TooManyRequests, ())),
Err(Unknown(err)) => {
eprintln!("Failed to rate limit ip {}: {:?}", ip, err);
Outcome::Failure((HttpStatus::InternalServerError, ()))
}
}
}
}
pub fn cors_fairing(config: &Config) -> Result<Cors, String> {
// todo properly implement CORS, this only works development
let allowed_origin: &str = config
.get_table("cors")
.expect("Missing config entry cors")
.get("allowed_origin")
.expect("Missing config entry 'cors.allowed_origin'")
.as_str()
.expect("Missing config entry cors.allowed_origin");
let allowed_origins = AllowedOrigins::some_exact(&[allowed_origin]);
CorsOptions {
allowed_origins,
allowed_methods: vec![Method::Get, Method::Post, Method::Put, Method::Delete]
.into_iter()
.map(From::from)
.collect(),
allowed_headers: AllowedHeaders::all(),
allow_credentials: true,
..Default::default()
}
.to_cors()
.map_err(|err| format!("Failed to setup CORS: {:?}", err))
}
| 36.525 | 99 | 0.588296 |
64bfe8f7f9ecc8b1d215a864cdbcf071cc990f84 | 965 | #![feature(proc_macro_hygiene)]
#[macro_use] extern crate rocket;
use std::path::{Path, PathBuf};
use rocket::http::ext::Normalize;
use rocket::Route;
#[get("/<path..>")]
fn files(route: &Route, path: PathBuf) -> String {
Path::new(route.base()).join(path).normalized_str().to_string()
}
mod route_guard_tests {
use super::*;
use rocket::local::Client;
fn assert_path(client: &Client, path: &str) {
let mut res = client.get(path).dispatch();
assert_eq!(res.body_string(), Some(path.into()));
}
#[test]
fn check_mount_path() {
let rocket = rocket::ignite()
.mount("/first", routes![files])
.mount("/second", routes![files]);
let client = Client::new(rocket).unwrap();
assert_path(&client, "/first/some/path");
assert_path(&client, "/second/some/path");
assert_path(&client, "/first/second/b/c");
assert_path(&client, "/second/a/b/c");
}
}
| 26.805556 | 67 | 0.597927 |
6731221f4c7f786316747dc82bcdc56bdce5b283 | 3,857 | use crate::{args, CoreOp, ICoreOp, IOp, ISymbol, Logic, QualIdentifier, Term, Void};
pub use amzn_smt_ir_derive::Operation;
use std::fmt;
/// An `Operation` is a type that can be used as the operation of a [`Logic`] i.e. `T::Op` where
/// `T: Logic`. This should usually not be implemented manually; instead, use the derive macro
/// exported along with this trait.
///
/// Operator symbols can be specified with the `#[symbol]` attribute. For instance, to specify a
/// variant should be parsed from the `+` symbol, add `#[symbol("+")]`.
///
/// **Note:** `Operation` can only be derived for `enum`s with a single type parameter corresponding
/// to a term.
///
/// # Examples
///
/// ## Deriving `Operation` for a simple enum.
///
/// ```
/// # fn main() {
/// use amzn_smt_ir::{fold::Fold, visit::Visit, Term, Operation, Logic, Void, QualIdentifier, ISort, UnknownSort, Sorted, Ctx};
/// use smt2parser::concrete::Constant;
///
/// #[derive(Operation, Fold, Visit, Clone, Hash, PartialEq, Eq)]
/// enum Math<Term> {
/// #[symbol("+")]
/// Add(Vec<Term>),
/// #[symbol("-")]
/// Neg(Term),
/// #[symbol("-")]
/// Subtract(Term, Term),
/// }
///
/// impl<L: Logic> Sorted<L> for Math<Term<L>> {
/// fn sort(&self, _: &mut Ctx) -> Result<ISort, UnknownSort<Term<L>>> {
/// Ok(ISort::int())
/// }
/// }
///
/// #[derive(Clone, Debug, Default, Hash, PartialEq, Eq)]
/// struct L;
/// impl Logic for L {
/// type Var = QualIdentifier;
/// type Op = Math<Term<Self>>;
/// type Quantifier = Void;
/// type UninterpretedFunc = Void;
/// }
/// type Op = Math::<Term<L>>;
///
/// let x = || Constant::Numeral(0u8.into()).into();
/// let t = || Term::Constant(x());
/// let func = QualIdentifier::from("-");
/// assert!(matches!(Op::parse(func.clone(), vec![]), Err(_)));
/// assert_eq!(Op::parse(func.clone(), vec![t()]), Ok(Math::Neg(Term::Constant(x()))));
/// assert_eq!(
/// Op::parse(func.clone(), vec![t(), t()]),
/// Ok(Math::Subtract(Term::Constant(x()), Term::Constant(x())))
/// );
/// assert!(matches!(Op::parse(func, vec![t(), t(), t()]), Err(_)));
/// # }
/// ```
pub trait Operation<T: Logic>: Sized {
/// Parses an operation from a function identifier and list of arguments.
fn parse(func: QualIdentifier, args: Vec<Term<T>>) -> Result<Self, InvalidOp<T>>;
/// Produces the function symbol of the application.
fn func(&self) -> ISymbol;
}
#[derive(Debug, Hash, PartialEq, Eq)]
pub struct InvalidOp<T: Logic> {
pub func: QualIdentifier,
pub args: Vec<Term<T>>,
}
impl<T: Logic> fmt::Display for InvalidOp<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"Invalid function application in logic {:?}: ({} ",
T::default(),
self.func
)?;
args::Format::fmt(&self.args, f, fmt::Display::fmt)?;
write!(f, ")")
}
}
impl<T: Logic> std::error::Error for InvalidOp<T> where Self: fmt::Debug + fmt::Display {}
impl<L: Logic> Operation<L> for ICoreOp<L> {
fn parse(func: QualIdentifier, args: Vec<Term<L>>) -> Result<Self, InvalidOp<L>> {
CoreOp::parse(func, args).map(Into::into)
}
fn func(&self) -> ISymbol {
self.as_ref().func()
}
}
impl<L: Logic> Operation<L> for IOp<L> {
fn parse(func: QualIdentifier, args: Vec<Term<L>>) -> Result<Self, InvalidOp<L>> {
L::Op::parse(func, args).map(Into::into)
}
fn func(&self) -> ISymbol {
self.as_ref().func()
}
}
impl<T: Logic> Operation<T> for Void {
fn parse(func: QualIdentifier, args: Vec<Term<T>>) -> Result<Self, InvalidOp<T>> {
Err(InvalidOp { func, args })
}
fn func(&self) -> ISymbol {
match *self {}
}
}
impl<L: Logic> From<Void> for IOp<L> {
fn from(x: Void) -> Self {
match x {}
}
}
| 30.611111 | 127 | 0.57454 |
334c7f4c73869c25a4b15fcb80aca2c5f59a9268 | 1,498 | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::error;
use std::result;
quick_error! {
#[derive(Debug)]
pub enum Error {
Io(err: std::io::Error) {
from()
cause(err)
description(err.description())
}
ClusterBootstrapped(cluster_id: u64) {
description("cluster bootstrap error")
display("cluster {} is already bootstrapped", cluster_id)
}
ClusterNotBootstrapped(cluster_id: u64) {
description("cluster not bootstrap error")
display("cluster {} is not bootstrapped", cluster_id)
}
Incompatible {
description("compatible error")
display("feature is not supported in other cluster components")
}
Grpc(err: grpcio::Error) {
from()
cause(err)
description(err.description())
}
Other(err: Box<dyn error::Error + Sync + Send>) {
from()
cause(err.as_ref())
description(err.description())
display("unknown error {:?}", err)
}
RegionNotFound(key: Vec<u8>) {
description("region is not found")
display("region is not found for key {:?}", key)
}
StoreTombstone(msg: String) {
description("store is tombstone")
display("store is tombstone {:?}", msg)
}
}
}
pub type Result<T> = result::Result<T, Error>;
| 30.571429 | 75 | 0.542056 |
019fe66a7e72da0eda0894be74f8c9a2be2135af | 2,429 | // Copyright 2018 The proptest developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[macro_use]
extern crate proptest_derive;
#[derive(Debug, Arbitrary)] //~ ERROR: 2 errors
//~| [proptest_derive, E0028]
//~| [proptest_derive, E0006]
enum NonFatal {
#[proptest(skip, filter(foo))]
V1(u8),
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T0 {
#[proptest(skip, filter(foo))]
V1(u8),
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T1 {
#[proptest(
skip,
filter(foo)
)]
V1 {
field: u8
},
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T2 {
#[proptest(skip)]
V1(
#[proptest(filter(foo))]
u8
),
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T3 {
#[proptest(skip)]
V1 {
#[proptest(filter(foo))]
field: u8
},
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T4 {
#[proptest(skip, filter(foo))]
V1(u8),
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T5 {
#[proptest(skip, filter(foo))]
V1 {
field: u8
},
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T6 {
#[proptest(skip)]
V1(
#[proptest(filter(foo))]
u8
),
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T7 {
#[proptest(skip)]
V1 {
#[proptest(filter(foo))]
field: usize
},
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T8 {
#[proptest(skip)]
V1 {
#[proptest(filter(foo))]
field: usize
},
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T9 {
#[proptest(skip)]
V1 {
#[proptest(filter(foo))]
field: usize
},
V2,
}
#[derive(Debug, Arbitrary)] //~ ERROR: [proptest_derive, E0028]
enum T10 {
#[proptest(skip)]
V1 {
#[proptest(filter(foo))]
field: usize
},
V2,
}
| 19.58871 | 68 | 0.561136 |
c1d30639e3535c36594e1b6e79b5220248984dd7 | 4,843 | fn fft_pattern<T>(base: &[T], digit: usize) -> impl Iterator<Item = &T> {
base.iter()
.flat_map(move |d| std::iter::repeat(d).take(digit + 1))
.cycle()
.skip(1)
}
pub fn fft(input: &[i32], pattern: &[i32]) -> Vec<i32> {
(0..input.len())
.map(|digit| {
input
.iter()
.zip(fft_pattern(pattern, digit))
.map(|(x, y)| x * y)
.sum::<i32>()
.abs()
% 10
})
.collect()
}
/// Algorithm from [u/paul2718](https://www.reddit.com/r/adventofcode/comments/ebf5cy/2019_day_16_part_2_understanding_how_to_come_up/fb4bvw4/).
pub fn decode(input: &[i32]) -> i32 {
const REPS: usize = 10_000;
const PHASES: usize = 100;
let start = input[0..7].iter().fold(0, |offset, d| offset * 10 + d) as usize;
let end = input.len() * REPS;
assert!(start > end / 2);
assert!(start < end);
let mut data = Vec::with_capacity(end - start);
for i in start..end {
data.push(input[i % input.len()]);
}
for _ in 0..PHASES {
for idx in (0..data.len() - 1).rev() {
data[idx] = (data[idx] + data[idx + 1]) % 10;
}
}
data[0..8].iter().fold(0, |offset, d| offset * 10 + d)
}
#[cfg(test)]
mod tests {
use super::*;
const BASE_PATTERN: [i32; 4] = [0, 1, 0, -1];
#[test]
fn fft_pattern_first_digit() {
assert_eq!(
fft_pattern(&[1, 2, 3], 0)
.take(5)
.copied()
.collect::<Vec<_>>(),
vec![2, 3, 1, 2, 3]
);
}
#[test]
fn fft_pattern_second_digit() {
assert_eq!(
fft_pattern(&[1, 2, 3], 1)
.take(5)
.copied()
.collect::<Vec<_>>(),
vec![1, 2, 2, 3, 3]
);
}
#[test]
fn base_fft_pattern() {
assert_eq!(
fft_pattern(&BASE_PATTERN, 7)
.take(40)
.copied()
.collect::<Vec<_>>(),
vec![
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1,
-1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 1
]
);
}
#[test]
fn example_1() {
let input = [1, 2, 3, 4, 5, 6, 7, 8];
let phase_1 = fft(&input, &BASE_PATTERN);
assert_eq!(phase_1, vec![4, 8, 2, 2, 6, 1, 5, 8]);
let phase_2 = fft(&phase_1, &BASE_PATTERN);
assert_eq!(phase_2, vec![3, 4, 0, 4, 0, 4, 3, 8]);
let phase_3 = fft(&phase_2, &BASE_PATTERN);
assert_eq!(phase_3, vec![0, 3, 4, 1, 5, 5, 1, 8]);
let phase_4 = fft(&phase_3, &BASE_PATTERN);
assert_eq!(phase_4, vec![0, 1, 0, 2, 9, 4, 9, 8]);
}
fn parse_input(data: &str) -> Vec<i32> {
data.chars()
.map(|c| c.to_digit(10).unwrap() as i32)
.collect()
}
#[test]
fn example_2() {
let mut input = parse_input("80871224585914546619083218645595");
for _ in 0..100 {
input = fft(&input, &BASE_PATTERN);
}
input.truncate(8);
assert_eq!(input, parse_input("24176176"));
}
#[test]
fn example_3() {
let mut input = parse_input("19617804207202209144916044189917");
for _ in 0..100 {
input = fft(&input, &BASE_PATTERN);
}
input.truncate(8);
assert_eq!(input, parse_input("73745418"));
}
#[test]
fn example_4() {
let mut input = parse_input("69317163492948606335995924319873");
for _ in 0..100 {
input = fft(&input, &BASE_PATTERN);
}
input.truncate(8);
assert_eq!(input, parse_input("52432133"));
}
#[test]
fn day_16_part_1() {
let mut input = parse_input(include_str!("input").lines().take(1).next().unwrap());
for _ in 0..100 {
input = fft(&input, &BASE_PATTERN);
}
input.truncate(8);
assert_eq!(input, parse_input("68317988"));
}
#[test]
fn example_5() {
let input = parse_input("03036732577212944063491565474664");
let output = decode(&input);
assert_eq!(output, 84_462_026);
}
#[test]
fn example_6() {
let input = parse_input("02935109699940807407585447034323");
let output = decode(&input);
assert_eq!(output, 78_725_270);
}
#[test]
fn example_7() {
let input = parse_input("03081770884921959731165446850517");
let output = decode(&input);
assert_eq!(output, 53_553_731);
}
#[test]
fn day_16_part_2() {
let input = parse_input(include_str!("input").lines().take(1).next().unwrap());
let output = decode(&input);
assert_eq!(output, 53_850_800);
}
}
| 27.99422 | 144 | 0.490605 |
567afb0d79360f453ddfc679d2c1fcd9e62f8071 | 11,731 | use super::Optimizer;
use crate::{mode::Mode, util::ExprOptExt};
use std::mem::take;
use swc_common::{util::take::Take, EqIgnoreSpan, DUMMY_SP};
use swc_ecma_ast::*;
use swc_ecma_utils::{ident::IdentLike, prepend, ExprExt, StmtExt, Type, Value::Known};
use swc_ecma_visit::{noop_visit_type, Node, Visit, VisitWith};
/// Methods related to option `switches`.
impl<M> Optimizer<'_, M>
where
M: Mode,
{
/// Handle switches in the case where we can know which branch will be
/// taken.
pub(super) fn optimize_const_switches(&mut self, s: &mut Stmt) {
if !self.options.switches || self.ctx.stmt_lablled {
return;
}
let (label, stmt) = match s {
Stmt::Switch(s) => (None, s),
Stmt::Labeled(l) => match &mut *l.body {
Stmt::Switch(s) => (Some(l.label.clone()), s),
_ => return,
},
_ => return,
};
let discriminant = &mut stmt.discriminant;
match &**discriminant {
Expr::Update(..) => return,
_ => {}
}
let matching_case = stmt.cases.iter_mut().position(|case| {
case.test
.as_ref()
.map(|test| discriminant.value_mut().eq_ignore_span(&test))
.unwrap_or(false)
});
if let Some(case_idx) = matching_case {
let mut var_ids = vec![];
let mut stmts = vec![];
let should_preserve_switch = stmt.cases.iter().skip(case_idx).any(|case| {
let mut v = BreakFinder {
found_unlabelled_break_for_stmt: false,
};
case.visit_with(&Invalid { span: DUMMY_SP }, &mut v);
v.found_unlabelled_break_for_stmt
});
if should_preserve_switch {
// Prevent infinite loop.
if stmt.cases.len() == 1 {
return;
}
log::debug!("switches: Removing unreachable cases from a constant switch");
} else {
log::debug!("switches: Removing a constant switch");
}
self.changed = true;
let mut preserved = vec![];
if !should_preserve_switch && !discriminant.is_lit() {
preserved.push(Stmt::Expr(ExprStmt {
span: stmt.span,
expr: discriminant.take(),
}));
if let Some(expr) = stmt.cases[case_idx].test.take() {
preserved.push(Stmt::Expr(ExprStmt {
span: stmt.cases[case_idx].span,
expr,
}));
}
}
for case in &stmt.cases[..case_idx] {
for cons in &case.cons {
var_ids.extend(
cons.extract_var_ids()
.into_iter()
.map(|name| VarDeclarator {
span: DUMMY_SP,
name: Pat::Ident(name.into()),
init: None,
definite: Default::default(),
}),
);
}
}
for case in stmt.cases.iter_mut().skip(case_idx) {
let mut found_break = false;
case.cons.retain(|stmt| match stmt {
Stmt::Break(BreakStmt { label: None, .. }) => {
found_break = true;
false
}
// TODO: Search recursively.
Stmt::Break(BreakStmt {
label: Some(break_label),
..
}) => {
if Some(break_label.to_id()) == label.as_ref().map(|label| label.to_id()) {
found_break = true;
false
} else {
!found_break
}
}
_ => !found_break,
});
stmts.append(&mut case.cons);
if found_break {
break;
}
}
if !var_ids.is_empty() {
prepend(
&mut stmts,
Stmt::Decl(Decl::Var(VarDecl {
span: DUMMY_SP,
kind: VarDeclKind::Var,
declare: Default::default(),
decls: take(&mut var_ids),
})),
)
}
let inner = if should_preserve_switch {
let mut cases = stmt.cases.take();
let case = SwitchCase {
span: cases[case_idx].span,
test: cases[case_idx].test.take(),
cons: stmts,
};
Stmt::Switch(SwitchStmt {
span: stmt.span,
discriminant: stmt.discriminant.take(),
cases: vec![case],
})
} else {
preserved.extend(stmts);
Stmt::Block(BlockStmt {
span: DUMMY_SP,
stmts: preserved,
})
};
*s = match label {
Some(label) => Stmt::Labeled(LabeledStmt {
span: DUMMY_SP,
label,
body: Box::new(inner),
}),
None => inner,
};
return;
}
}
/// Drops useless switch cases and statements in it.
///
/// This method will
///
/// - drop the empty cases at the end.
pub(super) fn optimize_switch_cases(&mut self, cases: &mut Vec<SwitchCase>) {
if !self.options.switches {
return;
}
// If default is not last, we can't remove empty cases.
let has_default = cases.iter().any(|case| case.test.is_none());
let all_ends_with_break = cases
.iter()
.all(|case| case.cons.is_empty() || case.cons.last().unwrap().is_break_stmt());
let mut preserve_cases = false;
if !all_ends_with_break && has_default {
if let Some(last) = cases.last() {
if last.test.is_some() {
preserve_cases = true;
}
}
}
self.merge_cases_with_same_cons(cases);
let last_non_empty = cases.iter().rposition(|case| {
// We should preserve test cases if the test is not a literal.
match case.test.as_deref() {
Some(Expr::Lit(..)) | None => {}
_ => return true,
}
if case.cons.is_empty() {
return false;
}
if case.cons.len() == 1 {
match case.cons[0] {
Stmt::Break(BreakStmt { label: None, .. }) => return false,
_ => {}
}
}
true
});
if !preserve_cases {
if let Some(last_non_empty) = last_non_empty {
if last_non_empty + 1 != cases.len() {
log::debug!("switches: Removing empty cases at the end");
self.changed = true;
cases.drain(last_non_empty + 1..);
}
}
}
if let Some(last) = cases.last_mut() {
match last.cons.last() {
Some(Stmt::Break(BreakStmt { label: None, .. })) => {
log::debug!("switches: Removing `break` at the end");
self.changed = true;
last.cons.pop();
}
_ => {}
}
}
}
/// If a case ends with break but content is same with the consequtive case
/// except the break statement, we merge them.
fn merge_cases_with_same_cons(&mut self, cases: &mut Vec<SwitchCase>) {
let stop_pos = cases.iter().position(|case| match case.test.as_deref() {
Some(Expr::Update(..)) => true,
_ => false,
});
let mut found = None;
'l: for (li, l) in cases.iter().enumerate().rev() {
if l.cons.is_empty() {
continue;
}
if let Some(stop_pos) = stop_pos {
if li > stop_pos {
continue;
}
}
if let Some(l_last) = l.cons.last() {
match l_last {
Stmt::Break(BreakStmt { label: None, .. }) => {}
_ => continue,
}
}
for r in cases.iter().skip(li + 1) {
if r.cons.is_empty() {
continue;
}
let mut r_cons_slice = r.cons.len();
if let Some(last) = r.cons.last() {
match last {
Stmt::Break(BreakStmt { label: None, .. }) => {
r_cons_slice -= 1;
}
_ => {}
}
}
if l.cons[..l.cons.len() - 1].eq_ignore_span(&r.cons[..r_cons_slice]) {
found = Some(li);
break 'l;
}
}
}
if let Some(idx) = found {
self.changed = true;
log::debug!("switches: Merging cases with same cons");
cases[idx].cons.clear();
}
}
/// Remove unreachable cases using discriminant.
pub(super) fn drop_unreachable_cases(&mut self, s: &mut SwitchStmt) {
if !self.options.switches {
return;
}
let dt = s.discriminant.get_type();
if let Known(Type::Bool) = dt {
let db = s.discriminant.as_pure_bool();
if let Known(db) = db {
s.cases.retain(|case| match case.test.as_deref() {
Some(test) => {
let tb = test.as_pure_bool();
match tb {
Known(tb) if db != tb => false,
_ => true,
}
}
None => false,
})
}
}
}
pub(super) fn optimize_switches(&mut self, _s: &mut Stmt) {
if !self.options.switches || self.ctx.stmt_lablled {
return;
}
//
}
}
#[derive(Default)]
struct BreakFinder {
found_unlabelled_break_for_stmt: bool,
}
impl Visit for BreakFinder {
noop_visit_type!();
fn visit_break_stmt(&mut self, s: &BreakStmt, _: &dyn Node) {
if s.label.is_none() {
self.found_unlabelled_break_for_stmt = true;
}
}
/// We don't care about breaks in a lop[
fn visit_for_stmt(&mut self, _: &ForStmt, _: &dyn Node) {}
/// We don't care about breaks in a lop[
fn visit_for_in_stmt(&mut self, _: &ForInStmt, _: &dyn Node) {}
/// We don't care about breaks in a lop[
fn visit_for_of_stmt(&mut self, _: &ForOfStmt, _: &dyn Node) {}
/// We don't care about breaks in a lop[
fn visit_do_while_stmt(&mut self, _: &DoWhileStmt, _: &dyn Node) {}
/// We don't care about breaks in a lop[
fn visit_while_stmt(&mut self, _: &WhileStmt, _: &dyn Node) {}
fn visit_function(&mut self, _: &Function, _: &dyn Node) {}
fn visit_arrow_expr(&mut self, _: &ArrowExpr, _: &dyn Node) {}
}
| 32.316804 | 99 | 0.435683 |
0a9352bbeafaec3ac8ebd4ba2e142b4621eaea28 | 6,522 | use amethyst::{
assets::{AssetStorage, Loader},
core::{cgmath::Vector3, transform::Transform},
ecs::prelude::World,
prelude::*,
renderer::{
Camera, MaterialTextureSet, PngFormat, Projection, SpriteRender, SpriteSheet,
SpriteSheetFormat, SpriteSheetHandle, Texture, TextureMetadata,
},
ui::{Anchor, TtfFormat, UiText, UiTransform},
};
use systems::ScoreText;
use {Ball, Paddle, Side};
use {ARENA_HEIGHT, ARENA_WIDTH};
pub struct Pong;
impl<'a, 'b> SimpleState<'a, 'b> for Pong {
fn on_start(&mut self, data: StateData<GameData>) {
let StateData { world, .. } = data;
use audio::initialise_audio;
// Load the spritesheet necessary to render the graphics.
// `spritesheet` is the layout of the sprites on the image;
// `texture` is the pixel data.
let sprite_sheet_handle = load_sprite_sheet(world);
// Setup our game.
initialise_paddles(world, sprite_sheet_handle.clone());
initialise_ball(world, sprite_sheet_handle);
initialise_camera(world);
initialise_audio(world);
initialise_score(world);
}
}
fn load_sprite_sheet(world: &mut World) -> SpriteSheetHandle {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `sprite_sheet` is the layout of the sprites on the image
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(
"texture/pong_spritesheet.png",
PngFormat,
TextureMetadata::srgb_scale(),
(),
&texture_storage,
)
};
// `texture_id` is a application defined ID given to the texture to store in the `World`.
// This is needed to link the texture to the sprite_sheet.
let texture_id = 0;
let mut material_texture_set = world.write_resource::<MaterialTextureSet>();
material_texture_set.insert(texture_id, texture_handle);
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
"texture/pong_spritesheet.ron", // Here we load the associated ron file
SpriteSheetFormat,
texture_id, // We pass it the ID of the texture we want it to use
(),
&sprite_sheet_store,
)
}
/// Initialise the camera.
fn initialise_camera(world: &mut World) {
let mut transform = Transform::default();
transform.translation.z = 1.0;
world
.create_entity()
.with(Camera::from(Projection::orthographic(
0.0,
ARENA_WIDTH,
ARENA_HEIGHT,
0.0,
))).with(transform)
.build();
}
/// Initialises one paddle on the left, and one paddle on the right.
fn initialise_paddles(world: &mut World, sprite_sheet_handle: SpriteSheetHandle) {
use {PADDLE_HEIGHT, PADDLE_VELOCITY, PADDLE_WIDTH};
let mut left_transform = Transform::default();
let mut right_transform = Transform::default();
// Correctly position the paddles.
let y = (ARENA_HEIGHT - PADDLE_HEIGHT) / 2.0;
left_transform.translation = Vector3::new(PADDLE_WIDTH * 0.5, y, 0.0);
right_transform.translation = Vector3::new(ARENA_WIDTH - PADDLE_WIDTH * 0.5, y, 0.0);
// Assign the sprites for the paddles
let sprite_render_left = SpriteRender {
sprite_sheet: sprite_sheet_handle.clone(),
sprite_number: 0, // paddle is the first sprite in the sprite_sheet
flip_horizontal: false,
flip_vertical: false,
};
let sprite_render_right = SpriteRender {
sprite_sheet: sprite_sheet_handle,
sprite_number: 0,
flip_horizontal: true,
flip_vertical: false,
};
// Create a left plank entity.
world
.create_entity()
.with(sprite_render_left)
.with(Paddle {
side: Side::Left,
width: PADDLE_WIDTH,
height: PADDLE_HEIGHT,
velocity: PADDLE_VELOCITY,
}).with(left_transform)
.build();
// Create right plank entity.
world
.create_entity()
.with(sprite_render_right)
.with(Paddle {
side: Side::Right,
width: PADDLE_WIDTH,
height: PADDLE_HEIGHT,
velocity: PADDLE_VELOCITY,
}).with(right_transform)
.build();
}
/// Initialises one ball in the middle-ish of the arena.
fn initialise_ball(world: &mut World, sprite_sheet_handle: SpriteSheetHandle) {
use {ARENA_HEIGHT, ARENA_WIDTH, BALL_RADIUS, BALL_VELOCITY_X, BALL_VELOCITY_Y};
// Create the translation.
let mut local_transform = Transform::default();
local_transform.translation = Vector3::new(ARENA_WIDTH / 2.0, ARENA_HEIGHT / 2.0, 0.0);
// Assign the sprite for the ball
let sprite_render = SpriteRender {
sprite_sheet: sprite_sheet_handle,
sprite_number: 1, // ball is the second sprite on the sprite_sheet
flip_horizontal: true,
flip_vertical: false,
};
world
.create_entity()
.with(sprite_render)
.with(Ball {
radius: BALL_RADIUS,
velocity: [BALL_VELOCITY_X, BALL_VELOCITY_Y],
}).with(local_transform)
.build();
}
fn initialise_score(world: &mut World) {
let font = world.read_resource::<Loader>().load(
"font/square.ttf",
TtfFormat,
Default::default(),
(),
&world.read_resource(),
);
let p1_transform = UiTransform::new(
"P1".to_string(),
Anchor::TopMiddle,
-50.,
-50.,
1.,
200.,
50.,
0,
);
let p2_transform = UiTransform::new(
"P2".to_string(),
Anchor::TopMiddle,
50.,
-50.,
1.,
200.,
50.,
0,
);
let p1_score = world
.create_entity()
.with(p1_transform)
.with(UiText::new(
font.clone(),
"0".to_string(),
[1.0, 1.0, 1.0, 1.0],
50.,
)).build();
let p2_score = world
.create_entity()
.with(p2_transform)
.with(UiText::new(
font,
"0".to_string(),
[1.0, 1.0, 1.0, 1.0],
50.,
)).build();
world.add_resource(ScoreText { p1_score, p2_score });
}
| 30.476636 | 93 | 0.607176 |
cc401a00cfd4a3ae371dad215d3da31061eb44c3 | 9,148 | #[doc = "Register `G2D_CLK` reader"]
pub struct R(crate::R<G2D_CLK_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<G2D_CLK_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<G2D_CLK_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<G2D_CLK_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `G2D_CLK` writer"]
pub struct W(crate::W<G2D_CLK_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<G2D_CLK_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<G2D_CLK_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<G2D_CLK_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Gating Clock\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CLK_GATING_A {
#[doc = "0: `0`"]
OFF = 0,
#[doc = "1: `1`"]
ON = 1,
}
impl From<CLK_GATING_A> for bool {
#[inline(always)]
fn from(variant: CLK_GATING_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `CLK_GATING` reader - Gating Clock"]
pub struct CLK_GATING_R(crate::FieldReader<bool>);
impl CLK_GATING_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
CLK_GATING_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CLK_GATING_A {
match self.bits {
false => CLK_GATING_A::OFF,
true => CLK_GATING_A::ON,
}
}
#[doc = "Checks if the value of the field is `OFF`"]
#[inline(always)]
pub fn is_off(&self) -> bool {
**self == CLK_GATING_A::OFF
}
#[doc = "Checks if the value of the field is `ON`"]
#[inline(always)]
pub fn is_on(&self) -> bool {
**self == CLK_GATING_A::ON
}
}
impl core::ops::Deref for CLK_GATING_R {
type Target = crate::FieldReader<bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CLK_GATING` writer - Gating Clock"]
pub struct CLK_GATING_W<'a> {
w: &'a mut W,
}
impl<'a> CLK_GATING_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CLK_GATING_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "`0`"]
#[inline(always)]
pub fn off(self) -> &'a mut W {
self.variant(CLK_GATING_A::OFF)
}
#[doc = "`1`"]
#[inline(always)]
pub fn on(self) -> &'a mut W {
self.variant(CLK_GATING_A::ON)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(1 << 31)) | ((value as u32 & 1) << 31);
self.w
}
}
#[doc = "Clock Source Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum CLK_SRC_SEL_A {
#[doc = "0: `0`"]
PLL_PERI_2X = 0,
#[doc = "1: `1`"]
PLL_VIDEO0_4X = 1,
#[doc = "2: `10`"]
PLL_VIDEO1_4X = 2,
#[doc = "3: `11`"]
PLL_AUDIO1_DIV2 = 3,
}
impl From<CLK_SRC_SEL_A> for u8 {
#[inline(always)]
fn from(variant: CLK_SRC_SEL_A) -> Self {
variant as _
}
}
#[doc = "Field `CLK_SRC_SEL` reader - Clock Source Select"]
pub struct CLK_SRC_SEL_R(crate::FieldReader<u8>);
impl CLK_SRC_SEL_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
CLK_SRC_SEL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<CLK_SRC_SEL_A> {
match self.bits {
0 => Some(CLK_SRC_SEL_A::PLL_PERI_2X),
1 => Some(CLK_SRC_SEL_A::PLL_VIDEO0_4X),
2 => Some(CLK_SRC_SEL_A::PLL_VIDEO1_4X),
3 => Some(CLK_SRC_SEL_A::PLL_AUDIO1_DIV2),
_ => None,
}
}
#[doc = "Checks if the value of the field is `PLL_PERI_2X`"]
#[inline(always)]
pub fn is_pll_peri_2x(&self) -> bool {
**self == CLK_SRC_SEL_A::PLL_PERI_2X
}
#[doc = "Checks if the value of the field is `PLL_VIDEO0_4X`"]
#[inline(always)]
pub fn is_pll_video0_4x(&self) -> bool {
**self == CLK_SRC_SEL_A::PLL_VIDEO0_4X
}
#[doc = "Checks if the value of the field is `PLL_VIDEO1_4X`"]
#[inline(always)]
pub fn is_pll_video1_4x(&self) -> bool {
**self == CLK_SRC_SEL_A::PLL_VIDEO1_4X
}
#[doc = "Checks if the value of the field is `PLL_AUDIO1_DIV2`"]
#[inline(always)]
pub fn is_pll_audio1_div2(&self) -> bool {
**self == CLK_SRC_SEL_A::PLL_AUDIO1_DIV2
}
}
impl core::ops::Deref for CLK_SRC_SEL_R {
type Target = crate::FieldReader<u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CLK_SRC_SEL` writer - Clock Source Select"]
pub struct CLK_SRC_SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CLK_SRC_SEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CLK_SRC_SEL_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "`0`"]
#[inline(always)]
pub fn pll_peri_2x(self) -> &'a mut W {
self.variant(CLK_SRC_SEL_A::PLL_PERI_2X)
}
#[doc = "`1`"]
#[inline(always)]
pub fn pll_video0_4x(self) -> &'a mut W {
self.variant(CLK_SRC_SEL_A::PLL_VIDEO0_4X)
}
#[doc = "`10`"]
#[inline(always)]
pub fn pll_video1_4x(self) -> &'a mut W {
self.variant(CLK_SRC_SEL_A::PLL_VIDEO1_4X)
}
#[doc = "`11`"]
#[inline(always)]
pub fn pll_audio1_div2(self) -> &'a mut W {
self.variant(CLK_SRC_SEL_A::PLL_AUDIO1_DIV2)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(7 << 24)) | ((value as u32 & 7) << 24);
self.w
}
}
#[doc = "Field `FACTOR_M` reader - Factor M"]
pub struct FACTOR_M_R(crate::FieldReader<u8>);
impl FACTOR_M_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
FACTOR_M_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for FACTOR_M_R {
type Target = crate::FieldReader<u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `FACTOR_M` writer - Factor M"]
pub struct FACTOR_M_W<'a> {
w: &'a mut W,
}
impl<'a> FACTOR_M_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x1f) | (value as u32 & 0x1f);
self.w
}
}
impl R {
#[doc = "Bit 31 - Gating Clock"]
#[inline(always)]
pub fn clk_gating(&self) -> CLK_GATING_R {
CLK_GATING_R::new(((self.bits >> 31) & 1) != 0)
}
#[doc = "Bits 24:26 - Clock Source Select"]
#[inline(always)]
pub fn clk_src_sel(&self) -> CLK_SRC_SEL_R {
CLK_SRC_SEL_R::new(((self.bits >> 24) & 7) as u8)
}
#[doc = "Bits 0:4 - Factor M"]
#[inline(always)]
pub fn factor_m(&self) -> FACTOR_M_R {
FACTOR_M_R::new((self.bits & 0x1f) as u8)
}
}
impl W {
#[doc = "Bit 31 - Gating Clock"]
#[inline(always)]
pub fn clk_gating(&mut self) -> CLK_GATING_W {
CLK_GATING_W { w: self }
}
#[doc = "Bits 24:26 - Clock Source Select"]
#[inline(always)]
pub fn clk_src_sel(&mut self) -> CLK_SRC_SEL_W {
CLK_SRC_SEL_W { w: self }
}
#[doc = "Bits 0:4 - Factor M"]
#[inline(always)]
pub fn factor_m(&mut self) -> FACTOR_M_W {
FACTOR_M_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "G2D Clock Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [g2d_clk](index.html) module"]
pub struct G2D_CLK_SPEC;
impl crate::RegisterSpec for G2D_CLK_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [g2d_clk::R](R) reader structure"]
impl crate::Readable for G2D_CLK_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [g2d_clk::W](W) writer structure"]
impl crate::Writable for G2D_CLK_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets G2D_CLK to value 0"]
impl crate::Resettable for G2D_CLK_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.509677 | 406 | 0.575973 |
38108da29efc485715e722f807ecd91eb4d4e870 | 824 | use hhmmss::Hhmmss;
use log::{Level, LevelFilter, Metadata, Record};
use std::time::Instant;
struct Logger;
impl log::Log for Logger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= Level::Info
}
fn log(&self, record: &Record) {
let execution_starts_at: Option<Instant> = None;
let elapsed = Instant::now().duration_since(execution_starts_at.expect("Failed to fetch elapsed time"));
println!("{} {:?}", elapsed.hhmmss(), record.args());
}
fn flush(&self) {}
}
/// Initialize the Logger
pub fn init() {
let execution_starts_at = Some(Instant::now());
println!("Execution starts at {:?}", execution_starts_at);
log::set_logger(&Logger)
.map(|()| log::set_max_level(LevelFilter::Info))
.expect("Failed to log");
}
| 26.580645 | 112 | 0.625 |
1aad46ceb48831c51a07871e565ba6ce970f5e4f | 3,802 | use crate::{
cmd_build::do_exec_build_cmd,
cmd_dump_syntax::do_exec_dump_syntax_cmd,
cmd_help::string_is_help_flag,
parse_cmd,
util::{dyn_error::DynError, package_info::PackageInfo},
Cmd,
};
use std::{
collections::HashMap,
env::Args,
fmt::Debug,
io,
panic::{self, UnwindSafe},
};
use tinyjson::{JsonParser, JsonValue};
fn as_string(value: JsonValue) -> Option<String> {
match value {
JsonValue::String(value) => Some(value),
_ => None,
}
}
fn as_array(value: JsonValue) -> Option<Vec<JsonValue>> {
match value {
JsonValue::Array(vec) => Some(vec),
_ => None,
}
}
struct InputRow {
id: JsonValue,
args: Vec<String>,
}
impl InputRow {
fn from_json(value: JsonValue) -> Result<Self, &'static str> {
let value = match value {
JsonValue::Object(mut map) => {
let id = map.remove("id").ok_or("expected id")?;
let args = map
.remove("args")
.and_then(|args| as_array(args)?.into_iter().map(as_string).collect())
.ok_or("expected args: string[]")?;
InputRow { id, args }
}
_ => return Err("expected {id, args}"),
};
Ok(value)
}
}
struct Action<A> {
id: JsonValue,
cmd: Cmd,
args: A,
}
fn write_batch_help() {
let package_info = PackageInfo::from_env();
print!(
include_str!("cmd_batch_help.txt"),
command = package_info.name,
version = package_info.version,
);
}
fn parse_action(json: &str) -> Result<Action<impl Iterator<Item = String>>, DynError> {
let row: InputRow = match JsonParser::new(json.chars()).parse() {
Ok(row) => InputRow::from_json(row)?,
Err(err) => return Err(format!("JSON としてパースできません。({})", err).into()),
};
let id = row.id;
let mut args = row.args.into_iter();
let cmd_name = match args.next() {
Some(cmd) => cmd,
None => return Err("args は空にできません。".into()),
};
let cmd = match parse_cmd(&cmd_name) {
Some(cmd) => cmd,
None => return Err(format!("サブコマンド '{}' はありません。", cmd_name).into()),
};
if let Cmd::Batch | Cmd::Help | Cmd::Version = cmd {
return Err(format!("batch ではサブコマンド '{}' を使えません。", cmd_name).into());
}
Ok(Action { id, cmd, args })
}
fn exec_action(action: Action<impl Iterator<Item = String> + UnwindSafe>) {
let Action { id, cmd, args } = action;
let caught = panic::catch_unwind(|| match cmd {
Cmd::Build => do_exec_build_cmd(args),
Cmd::DumpSyntax => do_exec_dump_syntax_cmd(args),
Cmd::Batch | Cmd::Help | Cmd::Version => unreachable!(),
});
let result: Result<(), Box<dyn Debug + Send + 'static>> = match caught {
Ok(Ok(())) => Ok(()),
Ok(Err(err)) => Err(err.into_inner()),
Err(err) => Err(Box::new(err)),
};
if let Err(err) = result {
let output = {
let mut o = HashMap::new();
o.insert("id".to_string(), id);
o.insert("err".to_string(), JsonValue::String(format!("{:?}", err)));
JsonValue::Object(o)
};
println!("{}", output.stringify().unwrap());
}
}
fn exec_loop() -> Result<(), DynError> {
let mut line = String::new();
io::stdin().read_line(&mut line)?;
if line.trim().is_empty() {
return Ok(());
}
let action = parse_action(&line)?;
let ((), result) = rayon::join(move || exec_action(action), exec_loop);
result
}
pub(crate) fn exec_batch_cmd(mut args: Args, help: bool) -> Result<(), DynError> {
if help || args.any(|arg| string_is_help_flag(&arg)) {
write_batch_help();
return Ok(());
}
exec_loop()
}
| 26.402778 | 90 | 0.551815 |
9122c103dcd1323aff707a8b36b23ba2a5e7a320 | 11,294 | // TODO: used by grpc-rust, should move it into separate crate.
#![doc(hidden)]
use std::io::Write;
// TODO: should not use wire_format here
use protobuf::wire_format;
/// Field visibility.
pub enum Visibility {
Public,
Default,
}
pub struct CodeWriter<'a> {
writer: &'a mut (Write + 'a),
indent: String,
}
impl<'a> CodeWriter<'a> {
pub fn new(writer: &'a mut Write) -> CodeWriter<'a> {
CodeWriter {
writer: writer,
indent: "".to_string(),
}
}
pub fn write_line<S : AsRef<str>>(&mut self, line: S) {
(if line.as_ref().is_empty() {
self.writer.write_all("\n".as_bytes())
} else {
let s: String = [self.indent.as_ref(), line.as_ref(), "\n"].concat();
self.writer.write_all(s.as_bytes())
}).unwrap();
}
pub fn write_generated(&mut self) {
self.write_line("// This file is generated. Do not edit");
self.write_generated_common();
}
pub fn write_generated_by(&mut self, pkg: &str, version: &str) {
self.write_line(format!(
"// This file is generated by {pkg} {version}. Do not edit",
pkg = pkg,
version = version
));
self.write_generated_common();
}
fn write_generated_common(&mut self) {
// https://secure.phabricator.com/T784
self.write_line("// @generated");
self.write_line("");
self.comment("https://github.com/Manishearth/rust-clippy/issues/702");
self.write_line("#![allow(unknown_lints)]");
self.write_line("#![allow(clippy)]");
self.write_line("");
self.write_line("#![cfg_attr(rustfmt, rustfmt_skip)]");
self.write_line("");
self.write_line("#![allow(box_pointers)]");
self.write_line("#![allow(dead_code)]");
self.write_line("#![allow(missing_docs)]");
self.write_line("#![allow(non_camel_case_types)]");
self.write_line("#![allow(non_snake_case)]");
self.write_line("#![allow(non_upper_case_globals)]");
self.write_line("#![allow(trivial_casts)]");
self.write_line("#![allow(unsafe_code)]");
self.write_line("#![allow(unused_imports)]");
self.write_line("#![allow(unused_results)]");
}
pub fn todo(&mut self, message: &str) {
self.write_line(format!("panic!(\"TODO: {}\");", message));
}
pub fn unimplemented(&mut self) {
self.write_line(format!("unimplemented!();"));
}
pub fn indented<F>(&mut self, cb: F)
where
F : Fn(&mut CodeWriter),
{
cb(&mut CodeWriter {
writer: self.writer,
indent: format!("{} ", self.indent),
});
}
#[allow(dead_code)]
pub fn commented<F>(&mut self, cb: F)
where
F : Fn(&mut CodeWriter),
{
cb(&mut CodeWriter {
writer: self.writer,
indent: format!("// {}", self.indent),
});
}
pub fn pub_const(&mut self, name: &str, field_type: &str, init: &str) {
self.write_line(&format!("pub const {}: {} = {};", name, field_type, init));
}
pub fn lazy_static(&mut self, name: &str, ty: &str) {
self.stmt_block(
&format!(
"static mut {}: ::protobuf::lazy::Lazy<{}> = ::protobuf::lazy::Lazy",
name,
ty
),
|w| {
w.field_entry("lock", "::protobuf::lazy::ONCE_INIT");
w.field_entry("ptr", &format!("0 as *const {}", ty));
},
);
}
pub fn lazy_static_decl_get<F>(&mut self, name: &str, ty: &str, init: F)
where
F : Fn(&mut CodeWriter),
{
self.lazy_static(name, ty);
self.unsafe_expr(|w| {
w.write_line(&format!("{}.get(|| {{", name));
w.indented(|w| init(w));
w.write_line(&format!("}})"));
});
}
pub fn lazy_static_decl_get_simple(&mut self, name: &str, ty: &str, init: &str) {
self.lazy_static(name, ty);
self.unsafe_expr(|w| { w.write_line(&format!("{}.get({})", name, init)); });
}
pub fn block<F>(&mut self, first_line: &str, last_line: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.write_line(first_line);
self.indented(cb);
self.write_line(last_line);
}
pub fn expr_block<F>(&mut self, prefix: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.block(&format!("{} {{", prefix), "}", cb);
}
pub fn stmt_block<S : AsRef<str>, F>(&mut self, prefix: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.block(&format!("{} {{", prefix.as_ref()), "};", cb);
}
pub fn unsafe_expr<F>(&mut self, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block("unsafe", cb);
}
pub fn impl_self_block<S : AsRef<str>, F>(&mut self, name: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("impl {}", name.as_ref()), cb);
}
pub fn impl_for_block<S1 : AsRef<str>, S2 : AsRef<str>, F>(&mut self, tr: S1, ty: S2, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("impl {} for {}", tr.as_ref(), ty.as_ref()), cb);
}
pub fn unsafe_impl(&mut self, what: &str, for_what: &str) {
self.write_line(&format!("unsafe impl {} for {} {{}}", what, for_what));
}
pub fn pub_struct<S : AsRef<str>, F>(&mut self, name: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("pub struct {}", name.as_ref()), cb);
}
pub fn def_struct<S : AsRef<str>, F>(&mut self, name: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("struct {}", name.as_ref()), cb);
}
pub fn pub_enum<F>(&mut self, name: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("pub enum {}", name), cb);
}
pub fn pub_trait<F>(&mut self, name: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("pub trait {}", name), cb);
}
pub fn pub_trait_extend<F>(&mut self, name: &str, extend: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("pub trait {} : {}", name, extend), cb);
}
pub fn field_entry(&mut self, name: &str, value: &str) {
self.write_line(&format!("{}: {},", name, value));
}
pub fn field_decl(&mut self, name: &str, field_type: &str) {
self.write_line(&format!("{}: {},", name, field_type));
}
pub fn pub_field_decl(&mut self, name: &str, field_type: &str) {
self.write_line(&format!("pub {}: {},", name, field_type));
}
pub fn field_decl_vis(&mut self, vis: Visibility, name: &str, field_type: &str) {
match vis {
Visibility::Public => self.pub_field_decl(name, field_type),
Visibility::Default => self.field_decl(name, field_type),
}
}
pub fn derive(&mut self, derive: &[&str]) {
let v: Vec<String> = derive.iter().map(|&s| s.to_string()).collect();
self.write_line(&format!("#[derive({})]", v.join(",")));
}
pub fn allow(&mut self, what: &[&str]) {
let v: Vec<String> = what.iter().map(|&s| s.to_string()).collect();
self.write_line(&format!("#[allow({})]", v.join(",")));
}
pub fn comment(&mut self, comment: &str) {
if comment.is_empty() {
self.write_line("//");
} else {
self.write_line(&format!("// {}", comment));
}
}
pub fn fn_def(&mut self, sig: &str) {
self.write_line(&format!("fn {};", sig));
}
pub fn fn_block<F>(&mut self, public: bool, sig: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
if public {
self.expr_block(&format!("pub fn {}", sig), cb);
} else {
self.expr_block(&format!("fn {}", sig), cb);
}
}
pub fn pub_fn<F>(&mut self, sig: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.fn_block(true, sig, cb);
}
pub fn def_fn<F>(&mut self, sig: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.fn_block(false, sig, cb);
}
pub fn def_mod<F>(&mut self, name: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("mod {}", name), cb)
}
pub fn pub_mod<F>(&mut self, name: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("pub mod {}", name), cb)
}
pub fn while_block<S : AsRef<str>, F>(&mut self, cond: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("while {}", cond.as_ref()), cb);
}
// if ... { ... }
pub fn if_stmt<S : AsRef<str>, F>(&mut self, cond: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("if {}", cond.as_ref()), cb);
}
// if ... {} else { ... }
pub fn if_else_stmt<S : AsRef<str>, F>(&mut self, cond: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.write_line(&format!("if {} {{", cond.as_ref()));
self.write_line("} else {");
self.indented(cb);
self.write_line("}");
}
// if let ... = ... { ... }
pub fn if_let_stmt<F>(&mut self, decl: &str, expr: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.if_stmt(&format!("let {} = {}", decl, expr), cb);
}
// if let ... = ... { } else { ... }
pub fn if_let_else_stmt<F>(&mut self, decl: &str, expr: &str, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.if_else_stmt(&format!("let {} = {}", decl, expr), cb);
}
pub fn for_stmt<S1 : AsRef<str>, S2 : AsRef<str>, F>(&mut self, over: S1, varn: S2, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.stmt_block(&format!("for {} in {}", varn.as_ref(), over.as_ref()), cb)
}
pub fn match_block<S : AsRef<str>, F>(&mut self, value: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.stmt_block(&format!("match {}", value.as_ref()), cb);
}
pub fn match_expr<S : AsRef<str>, F>(&mut self, value: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.expr_block(&format!("match {}", value.as_ref()), cb);
}
pub fn case_block<S : AsRef<str>, F>(&mut self, cond: S, cb: F)
where
F : Fn(&mut CodeWriter),
{
self.block(&format!("{} => {{", cond.as_ref()), "},", cb);
}
pub fn case_expr<S1 : AsRef<str>, S2 : AsRef<str>>(&mut self, cond: S1, body: S2) {
self.write_line(&format!("{} => {},", cond.as_ref(), body.as_ref()));
}
pub fn error_unexpected_wire_type(&mut self, wire_type: &str) {
self.write_line(&format!(
"return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type({}));",
wire_type
));
}
pub fn assert_wire_type(&mut self, wire_type: wire_format::WireType) {
self.if_stmt(&format!("wire_type != ::protobuf::wire_format::{:?}", wire_type), |w| {
w.error_unexpected_wire_type("wire_type");
});
}
}
| 29.108247 | 96 | 0.518948 |
1d121216a682548c211d404f4424f5188576736c | 4,528 | //! Contains functions to create the text-based interface.
use cursive::views::{Dialog, DummyView, LinearLayout, Panel, TextView};
use cursive::Cursive;
use crate::config;
use crate::types::{Events, SyncedDevice, SyncedFolder, Viewmodel};
use crate::viewmodel;
/// Creates the text-based interface using curses.
///
/// # Example
///
/// ```
/// show_dashboard();
/// ```
pub fn show_dashboard() {
// Create the cursive root
let mut siv = cursive::default();
// Add the global commands
siv.add_global_callback('r', reload_config);
siv.add_global_callback('s', refresh_connection_statuses);
siv.add_global_callback('q', |s| s.quit());
// Show notice about being under development
show_development_notice(&mut siv);
// Start the event loop
siv.run();
}
/// Shows the note about being under development.
///
/// # Example
///
/// ```
/// show_development_notice();
/// ```
fn show_development_notice(s: &mut Cursive) {
s.add_layer(
Dialog::text("Synchronice is still under development!")
.title("Notice")
.button("Proceed", start),
);
}
/// Loads data and shows the dashboard.
///
/// # Example
///
/// ```
/// start();
/// ```
fn start(s: &mut Cursive) {
s.pop_layer();
let connection = config::get_connection();
// Check for connection details
if connection.apikey == "" || connection.address == "" {
s.add_layer(
Dialog::text("Could not fetch connection details!")
.title("Error")
.button("Quit", |s| s.quit()),
);
}
// Init viewmodel
viewmodel::init();
// Load interface
reload_config(s);
}
/// Refreshes the list of folders and devices.
///
/// # Example
///
/// ```
/// reload_config(s);
/// ```
fn reload_config(s: &mut Cursive) {
// Remove previous layer
s.pop_layer();
// Replace with new layer
s.add_layer(get_updated_dashboard(true));
}
/// Refreshes the connection statuses.
///
/// # Example
///
/// ```
/// refresh_connection_statuses(s);
/// ```
fn refresh_connection_statuses(s: &mut Cursive) {
// Remove previous layer
s.pop_layer();
// Replace with new layer
s.add_layer(get_updated_dashboard(false));
}
/// Returns a new or updated dashboard layer.
///
/// # Example
///
/// ```
/// get_updated_dashboard(true)
/// ```
fn get_updated_dashboard(is_initial_load: bool) -> Dialog {
// Construct latest viewmodel
viewmodel::refresh_viewmodel(is_initial_load);
// Get display layouts
let (folders_layout, devices_layout) = get_display_layouts(&viewmodel::get_data());
// Return latest dashboard layer
get_dashboard_layer(folders_layout, devices_layout)
}
/// Gets a tuple of data-filled layouts.
///
/// # Example
///
/// ```
/// get_display_layouts(viewmodel);
/// ```
fn get_display_layouts(viewmodel: &Viewmodel) -> (LinearLayout, LinearLayout) {
// Create folders and devices layouts
let mut folders_layout: LinearLayout = LinearLayout::vertical();
let mut devices_layout: LinearLayout = LinearLayout::vertical();
// Populate list of folders
for folder in &viewmodel.synced_folders {
folders_layout.add_child(create_folder_view(&folder));
}
// Populate list of devices
for device in &viewmodel.synced_devices {
devices_layout.add_child(create_device_view(&device));
}
(folders_layout, devices_layout)
}
/// Creates a folder view
///
/// # Example
///
/// ```
/// create_folder_view(folder);
/// ```
fn create_folder_view(folder: &SyncedFolder) -> Panel<TextView> {
Panel::new(TextView::new(folder.label))
}
/// Creates a device view
///
/// # Example
///
/// ```
/// create_device_view(device);
/// ```
fn create_device_view(device: &SyncedDevice) -> Panel<TextView> {
Panel::new(TextView::new(device.name))
}
/// Creates an updated layer based on latest viewmodel.
///
/// # Example
///
/// ```
/// get_dashboard_layer(folders_layout, devices_layout);
/// ```
fn get_dashboard_layer(folders_layout: LinearLayout, devices_layout: LinearLayout) -> Dialog {
Dialog::around(
LinearLayout::vertical().child(DummyView).child(
LinearLayout::horizontal()
.child(Dialog::around(folders_layout).title("Folders"))
.child(Dialog::around(devices_layout).title("Devices")),
),
)
.title("Synchronice")
.button("(R)eload", reload_config)
.button("Refre(S)h", refresh_connection_statuses)
.button("(Q)uit", |s| s.quit())
}
| 23.706806 | 94 | 0.640018 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.