hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
16f74c77827b3b16291c4b99baa4a98b10d6387e | 345 | pub fn chmin<T>(input: &mut T, limit: T) -> bool
where
T: PartialOrd,
{
if *input > limit {
*input = limit;
true
} else {
false
}
}
pub fn chmax<T>(input: &mut T, limit: T) -> bool
where
T: PartialOrd,
{
if *input < limit {
*input = limit;
true
} else {
false
}
}
| 14.375 | 48 | 0.469565 |
cc471893356d54eee36ec998e0bf8b6c5ff2c57f | 5,125 | // This file is part of Substrate.
// Copyright (C) 2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Benchmarks for the Session Pallet.
// This is separated into its own crate due to cyclic dependency issues.
#![cfg_attr(not(feature = "std"), no_std)]
mod mock;
use sp_std::prelude::*;
use sp_std::vec;
use frame_benchmarking::benchmarks;
use frame_support::{
codec::Decode,
storage::StorageValue,
traits::{KeyOwnerProofSystem, OnInitialize},
};
use frame_system::RawOrigin;
use pallet_session::{historical::Module as Historical, Module as Session, *};
use pallet_staking::{
benchmarking::create_validator_with_nominators, testing_utils::create_validators,
MAX_NOMINATIONS,
};
use sp_runtime::traits::{One, StaticLookup};
const MAX_VALIDATORS: u32 = 1000;
pub struct Module<T: Trait>(pallet_session::Module<T>);
pub trait Trait: pallet_session::Trait + pallet_session::historical::Trait + pallet_staking::Trait {}
impl<T: Trait> OnInitialize<T::BlockNumber> for Module<T> {
fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight {
pallet_session::Module::<T>::on_initialize(n)
}
}
benchmarks! {
_ { }
set_keys {
let n in 1 .. MAX_NOMINATIONS as u32;
let v_stash = create_validator_with_nominators::<T>(n, MAX_NOMINATIONS as u32, false)?;
let v_controller = pallet_staking::Module::<T>::bonded(&v_stash).ok_or("not stash")?;
let keys = T::Keys::default();
let proof: Vec<u8> = vec![0,1,2,3];
}: _(RawOrigin::Signed(v_controller), keys, proof)
purge_keys {
let n in 1 .. MAX_NOMINATIONS as u32;
let v_stash = create_validator_with_nominators::<T>(n, MAX_NOMINATIONS as u32, false)?;
let v_controller = pallet_staking::Module::<T>::bonded(&v_stash).ok_or("not stash")?;
let keys = T::Keys::default();
let proof: Vec<u8> = vec![0,1,2,3];
Session::<T>::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?;
}: _(RawOrigin::Signed(v_controller))
check_membership_proof_current_session {
let n in 2 .. MAX_VALIDATORS as u32;
let (key, key_owner_proof1) = check_membership_proof_setup::<T>(n);
let key_owner_proof2 = key_owner_proof1.clone();
}: {
Historical::<T>::check_proof(key, key_owner_proof1);
}
verify {
assert!(Historical::<T>::check_proof(key, key_owner_proof2).is_some());
}
check_membership_proof_historical_session {
let n in 2 .. MAX_VALIDATORS as u32;
let (key, key_owner_proof1) = check_membership_proof_setup::<T>(n);
// skip to the next session so that the session is historical
// and the membership merkle proof must be checked.
Session::<T>::rotate_session();
let key_owner_proof2 = key_owner_proof1.clone();
}: {
Historical::<T>::check_proof(key, key_owner_proof1);
}
verify {
assert!(Historical::<T>::check_proof(key, key_owner_proof2).is_some());
}
}
/// Sets up the benchmark for checking a membership proof. It creates the given
/// number of validators, sets random session keys and then creates a membership
/// proof for the first authority and returns its key and the proof.
fn check_membership_proof_setup<T: Trait>(
n: u32,
) -> (
(sp_runtime::KeyTypeId, &'static [u8; 32]),
sp_session::MembershipProof,
) {
pallet_staking::ValidatorCount::put(n);
// create validators and set random session keys
for (n, who) in create_validators::<T>(n, 1000)
.unwrap()
.into_iter()
.enumerate()
{
use rand::RngCore;
use rand::SeedableRng;
let validator = T::Lookup::lookup(who).unwrap();
let controller = pallet_staking::Module::<T>::bonded(validator).unwrap();
let keys = {
let mut keys = [0u8; 128];
// we keep the keys for the first validator as 0x00000...
if n > 0 {
let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64);
rng.fill_bytes(&mut keys);
}
keys
};
let keys: T::Keys = Decode::decode(&mut &keys[..]).unwrap();
let proof: Vec<u8> = vec![];
Session::<T>::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap();
}
Module::<T>::on_initialize(T::BlockNumber::one());
// skip sessions until the new validator set is enacted
while Session::<T>::validators().len() < n as usize {
Session::<T>::rotate_session();
}
let key = (sp_runtime::KeyTypeId(*b"babe"), &[0u8; 32]);
(key, Historical::<T>::prove(key).unwrap())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::mock::{new_test_ext, Test};
use frame_support::assert_ok;
#[test]
fn test_benchmarks() {
new_test_ext().execute_with(|| {
assert_ok!(test_benchmark_set_keys::<Test>());
assert_ok!(test_benchmark_purge_keys::<Test>());
});
}
}
| 30.147059 | 101 | 0.704585 |
3aadfbe7c62481e8c5badea89c4d5012bfde6fcb | 3,619 | use hdrhistogram::serialization::interval_log;
use hdrhistogram::Histogram;
use std::time::Duration;
include!("../../../../baseline/memcached_ffi_wrappers.rs");
#[derive(Default, Clone)]
pub struct Timeline {
// these are logarithmically spaced
// the first histogram is 0-1s after start, the second 1-2s after start, then 2-4s, etc.
histograms: Vec<Histograms>,
total_duration: Duration,
}
#[derive(Clone)]
pub struct Histograms {
processing: Histogram<u64>,
sojourn: Histogram<u64>,
}
impl Default for Histograms {
fn default() -> Self {
Self {
processing: Histogram::new_with_bounds(1, 60_000_000, 3).unwrap(),
sojourn: Histogram::new_with_bounds(1, 60_000_000, 3).unwrap(),
}
}
}
impl Histograms {
pub fn processing(&mut self, time: u64) {
self.processing.saturating_record(time);
}
pub fn sojourn(&mut self, time: u64) {
self.sojourn.saturating_record(time);
}
pub fn merge(&mut self, other: &Self) {
self.processing.add(&other.processing).expect("same bounds");
self.sojourn.add(&other.sojourn).expect("same bounds");
}
}
impl Timeline {
pub fn set_total_duration(&mut self, total: Duration) {
self.total_duration = total;
}
pub fn histogram_for(&mut self, issued_at: Duration) -> &mut Histograms {
let hist = ((issued_at.as_secs_f64() + 0.000000000001).ceil() as usize)
.next_power_of_two()
.trailing_zeros() as usize;
if hist >= self.histograms.len() {
self.histograms.resize(hist + 1, Histograms::default());
}
self.histograms.get_mut(hist).unwrap()
}
pub fn merge(&mut self, other: &Self) {
for (ti, other_hs) in other.histograms.iter().enumerate() {
if let Some(self_hs) = self.histograms.get_mut(ti) {
self_hs.merge(other_hs);
} else {
self.histograms.push(other_hs.clone());
}
}
}
pub fn write<W: std::io::Write, S: hdrhistogram::serialization::Serializer>(
&self,
w: &mut interval_log::IntervalLogWriter<W, S>,
) -> std::result::Result<(), interval_log::IntervalLogWriterError<S::SerializeError>> {
let proc_tag = interval_log::Tag::new("processing").unwrap();
let sjrn_tag = interval_log::Tag::new("sojourn").unwrap();
for (i, hs) in self.histograms.iter().enumerate() {
let start = Duration::from_secs((1 << i) >> 1);
let mut dur = Duration::from_secs(1 << i) - start;
if self.total_duration != Duration::new(0, 0) && start + dur > self.total_duration {
dur = self.total_duration - start;
}
w.write_histogram(&hs.processing, start, dur, Some(proc_tag))?;
w.write_histogram(&hs.sojourn, start, dur, Some(sjrn_tag))?;
}
Ok(())
}
pub fn last(&self) -> Option<(&Histogram<u64>, &Histogram<u64>)> {
self.histograms.last().map(|h| (&h.processing, &h.sojourn))
}
pub fn collapse(&self) -> (Histogram<u64>, Histogram<u64>) {
let mut hists = self.histograms.iter();
if let Some(hs) = hists.next() {
let mut proc = hs.processing.clone();
let mut sjrn = hs.sojourn.clone();
for hs in hists {
proc.add(&hs.processing).expect("same bounds");
sjrn.add(&hs.sojourn).expect("same bounds");
}
(proc, sjrn)
} else {
(Histogram::new(1).unwrap(), Histogram::new(1).unwrap())
}
}
}
| 33.509259 | 96 | 0.584968 |
895f9e4959f1fcaaf9b4f830f45e2b4b7dc00e21 | 19,991 | use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::{CliValidatorInfo, CliValidatorInfoVec},
};
use bincode::deserialize;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use reqwest::blocking::Client;
use serde_derive::{Deserialize, Serialize};
use serde_json::{Map, Value};
use solana_clap_utils::{
input_parsers::pubkey_of,
input_validators::{is_pubkey, is_url},
keypair::signer_from_path,
};
use solana_client::rpc_client::RpcClient;
use solana_config_program::{config_instruction, get_config_data, ConfigKeys, ConfigState};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
account::Account,
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::{error, sync::Arc};
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
pub const MAX_VALIDATOR_INFO: u64 = 576;
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
pub struct ValidatorInfo {
info: String,
}
impl ConfigState for ValidatorInfo {
fn max_space() -> u64 {
MAX_VALIDATOR_INFO
}
}
// Return an error if a validator details are longer than the max length.
pub fn check_details_length(string: String) -> Result<(), String> {
if string.len() > MAX_LONG_FIELD_LENGTH {
Err(format!(
"validator details longer than {:?}-byte limit",
MAX_LONG_FIELD_LENGTH
))
} else {
Ok(())
}
}
// Return an error if url field is too long or cannot be parsed.
pub fn check_url(string: String) -> Result<(), String> {
is_url(string.clone())?;
if string.len() > MAX_SHORT_FIELD_LENGTH {
Err(format!(
"url longer than {:?}-byte limit",
MAX_SHORT_FIELD_LENGTH
))
} else {
Ok(())
}
}
// Return an error if a validator field is longer than the max length.
pub fn is_short_field(string: String) -> Result<(), String> {
if string.len() > MAX_SHORT_FIELD_LENGTH {
Err(format!(
"validator field longer than {:?}-byte limit",
MAX_SHORT_FIELD_LENGTH
))
} else {
Ok(())
}
}
fn verify_keybase(
validator_pubkey: &Pubkey,
keybase_username: &Value,
) -> Result<(), Box<dyn error::Error>> {
if let Some(keybase_username) = keybase_username.as_str() {
let url = format!(
"https://keybase.pub/{}/solana/validator-{:?}",
keybase_username, validator_pubkey
);
let client = Client::new();
if client.head(&url).send()?.status().is_success() {
Ok(())
} else {
Err(format!("keybase_username could not be confirmed at: {}. Please add this pubkey file to your keybase profile to connect", url).into())
}
} else {
Err(format!(
"keybase_username could not be parsed as String: {}",
keybase_username
)
.into())
}
}
fn parse_args(matches: &ArgMatches<'_>) -> Value {
let mut map = Map::new();
map.insert(
"name".to_string(),
Value::String(matches.value_of("name").unwrap().to_string()),
);
if let Some(url) = matches.value_of("website") {
map.insert("website".to_string(), Value::String(url.to_string()));
}
if let Some(details) = matches.value_of("details") {
map.insert("details".to_string(), Value::String(details.to_string()));
}
if let Some(keybase_username) = matches.value_of("keybase_username") {
map.insert(
"keybaseUsername".to_string(),
Value::String(keybase_username.to_string()),
);
}
Value::Object(map)
}
fn parse_validator_info(
pubkey: &Pubkey,
account: &Account,
) -> Result<(Pubkey, Map<String, serde_json::value::Value>), Box<dyn error::Error>> {
if account.owner != solana_config_program::id() {
return Err(format!("{} is not a validator info account", pubkey).into());
}
let key_list: ConfigKeys = deserialize(&account.data)?;
if !key_list.keys.is_empty() {
let (validator_pubkey, _) = key_list.keys[1];
let validator_info_string: String = deserialize(&get_config_data(&account.data)?)?;
let validator_info: Map<_, _> = serde_json::from_str(&validator_info_string)?;
Ok((validator_pubkey, validator_info))
} else {
Err(format!("{} could not be parsed as a validator info account", pubkey).into())
}
}
pub trait ValidatorInfoSubCommands {
fn validator_info_subcommands(self) -> Self;
}
impl ValidatorInfoSubCommands for App<'_, '_> {
fn validator_info_subcommands(self) -> Self {
self.subcommand(
SubCommand::with_name("validator-info")
.about("Publish/get Validator info on Solana")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("publish")
.about("Publish Validator info on Solana")
.arg(
Arg::with_name("info_pubkey")
.short("p")
.long("info-pubkey")
.value_name("PUBKEY")
.takes_value(true)
.validator(is_pubkey)
.help("The pubkey of the Validator info account to update"),
)
.arg(
Arg::with_name("name")
.index(1)
.value_name("NAME")
.takes_value(true)
.required(true)
.validator(is_short_field)
.help("Validator name"),
)
.arg(
Arg::with_name("website")
.short("w")
.long("website")
.value_name("URL")
.takes_value(true)
.validator(check_url)
.help("Validator website url"),
)
.arg(
Arg::with_name("keybase_username")
.short("n")
.long("keybase")
.value_name("USERNAME")
.takes_value(true)
.validator(is_short_field)
.help("Validator Keybase username"),
)
.arg(
Arg::with_name("details")
.short("d")
.long("details")
.value_name("DETAILS")
.takes_value(true)
.validator(check_details_length)
.help("Validator description")
)
.arg(
Arg::with_name("force")
.long("force")
.takes_value(false)
.hidden(true) // Don't document this argument to discourage its use
.help("Override keybase username validity check"),
),
)
.subcommand(
SubCommand::with_name("get")
.about("Get and parse Solana Validator info")
.arg(
Arg::with_name("info_pubkey")
.index(1)
.value_name("PUBKEY")
.takes_value(true)
.validator(is_pubkey)
.help("The pubkey of the Validator info account; without this argument, returns all"),
),
)
)
}
}
pub fn parse_validator_info_command(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let info_pubkey = pubkey_of(matches, "info_pubkey");
// Prepare validator info
let validator_info = parse_args(matches);
Ok(CliCommandInfo {
command: CliCommand::SetValidatorInfo {
validator_info,
force_keybase: matches.is_present("force"),
info_pubkey,
},
signers: vec![signer_from_path(
matches,
default_signer_path,
"keypair",
wallet_manager,
)?],
})
}
pub fn parse_get_validator_info_command(
matches: &ArgMatches<'_>,
) -> Result<CliCommandInfo, CliError> {
let info_pubkey = pubkey_of(matches, "info_pubkey");
Ok(CliCommandInfo {
command: CliCommand::GetValidatorInfo(info_pubkey),
signers: vec![],
})
}
pub fn process_set_validator_info(
rpc_client: &RpcClient,
config: &CliConfig,
validator_info: &Value,
force_keybase: bool,
info_pubkey: Option<Pubkey>,
) -> ProcessResult {
// Validate keybase username
if let Some(string) = validator_info.get("keybaseUsername") {
let result = verify_keybase(&config.signers[0].pubkey(), &string);
if result.is_err() {
if force_keybase {
println!("--force supplied, ignoring: {:?}", result);
} else {
result.map_err(|err| {
CliError::BadParameter(format!("Invalid validator keybase username: {}", err))
})?;
}
}
}
let validator_string = serde_json::to_string(&validator_info).unwrap();
let validator_info = ValidatorInfo {
info: validator_string,
};
// Check for existing validator-info account
let all_config = rpc_client.get_program_accounts(&solana_config_program::id())?;
let existing_account = all_config
.iter()
.filter(|(_, account)| {
let key_list: ConfigKeys = deserialize(&account.data).map_err(|_| false).unwrap();
key_list.keys.contains(&(id(), false))
})
.find(|(pubkey, account)| {
let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap();
validator_pubkey == config.signers[0].pubkey()
});
// Create validator-info keypair to use if info_pubkey not provided or does not exist
let info_keypair = Keypair::new();
let mut info_pubkey = if let Some(pubkey) = info_pubkey {
pubkey
} else if let Some(validator_info) = existing_account {
validator_info.0
} else {
info_keypair.pubkey()
};
// Check existence of validator-info account
let balance = rpc_client
.poll_get_balance_with_commitment(&info_pubkey, CommitmentConfig::default())
.unwrap_or(0);
let keys = vec![(id(), false), (config.signers[0].pubkey(), true)];
let (message, signers): (Message, Vec<&dyn Signer>) = if balance == 0 {
if info_pubkey != info_keypair.pubkey() {
println!(
"Account {:?} does not exist. Generating new keypair...",
info_pubkey
);
info_pubkey = info_keypair.pubkey();
}
println!(
"Publishing info for Validator {:?}",
config.signers[0].pubkey()
);
let lamports = rpc_client
.get_minimum_balance_for_rent_exemption(ValidatorInfo::max_space() as usize)?;
let mut instructions = config_instruction::create_account::<ValidatorInfo>(
&config.signers[0].pubkey(),
&info_keypair.pubkey(),
lamports,
keys.clone(),
);
instructions.extend_from_slice(&[config_instruction::store(
&info_keypair.pubkey(),
true,
keys,
&validator_info,
)]);
let signers = vec![config.signers[0], &info_keypair];
let message = Message::new(&instructions);
(message, signers)
} else {
println!(
"Updating Validator {:?} info at: {:?}",
config.signers[0].pubkey(),
info_pubkey
);
let instructions = vec![config_instruction::store(
&info_pubkey,
false,
keys,
&validator_info,
)];
let message = Message::new_with_payer(&instructions, Some(&config.signers[0].pubkey()));
let signers = vec![config.signers[0]];
(message, signers)
};
// Submit transaction
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&signers, recent_blockhash)?;
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
println!("Success! Validator info published at: {:?}", info_pubkey);
println!("{}", signature_str);
Ok("".to_string())
}
pub fn process_get_validator_info(
rpc_client: &RpcClient,
config: &CliConfig,
pubkey: Option<Pubkey>,
) -> ProcessResult {
let validator_info: Vec<(Pubkey, Account)> = if let Some(validator_info_pubkey) = pubkey {
vec![(
validator_info_pubkey,
rpc_client.get_account(&validator_info_pubkey)?,
)]
} else {
let all_config = rpc_client.get_program_accounts(&solana_config_program::id())?;
all_config
.into_iter()
.filter(|(_, validator_info_account)| {
let key_list: ConfigKeys = deserialize(&validator_info_account.data)
.map_err(|_| false)
.unwrap();
key_list.keys.contains(&(id(), false))
})
.collect()
};
let mut validator_info_list: Vec<CliValidatorInfo> = vec![];
if validator_info.is_empty() {
println!("No validator info accounts found");
}
for (validator_info_pubkey, validator_info_account) in validator_info.iter() {
let (validator_pubkey, validator_info) =
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
validator_info_list.push(CliValidatorInfo {
identity_pubkey: validator_pubkey.to_string(),
info_pubkey: validator_info_pubkey.to_string(),
info: validator_info,
});
}
config
.output_format
.formatted_print(&CliValidatorInfoVec::new(validator_info_list));
Ok("".to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cli::app;
use bincode::{serialize, serialized_size};
use serde_json::json;
#[test]
fn test_check_url() {
let url = "http://test.com";
assert_eq!(check_url(url.to_string()), Ok(()));
let long_url = "http://7cLvFwLCbyHuXQ1RGzhCMobAWYPMSZ3VbUml1qWi1nkc3FD7zj9hzTZzMvYJ.com";
assert!(check_url(long_url.to_string()).is_err());
let non_url = "not parseable";
assert!(check_url(non_url.to_string()).is_err());
}
#[test]
fn test_is_short_field() {
let name = "Alice Validator";
assert_eq!(is_short_field(name.to_string()), Ok(()));
let long_name = "Alice 7cLvFwLCbyHuXQ1RGzhCMobAWYPMSZ3VbUml1qWi1nkc3FD7zj9hzTZzMvYJt6rY9";
assert!(is_short_field(long_name.to_string()).is_err());
}
#[test]
fn test_parse_args() {
let matches = app("test", "desc", "version").get_matches_from(vec![
"test",
"validator-info",
"publish",
"Alice",
"-n",
"alice_keybase",
]);
let subcommand_matches = matches.subcommand();
assert_eq!(subcommand_matches.0, "validator-info");
assert!(subcommand_matches.1.is_some());
let subcommand_matches = subcommand_matches.1.unwrap().subcommand();
assert_eq!(subcommand_matches.0, "publish");
assert!(subcommand_matches.1.is_some());
let matches = subcommand_matches.1.unwrap();
let expected = json!({
"name": "Alice",
"keybaseUsername": "alice_keybase",
});
assert_eq!(parse_args(&matches), expected);
}
#[test]
fn test_validator_info_serde() {
let mut info = Map::new();
info.insert("name".to_string(), Value::String("Alice".to_string()));
let info_string = serde_json::to_string(&Value::Object(info)).unwrap();
let validator_info = ValidatorInfo {
info: info_string.clone(),
};
assert_eq!(serialized_size(&validator_info).unwrap(), 24);
assert_eq!(
serialize(&validator_info).unwrap(),
vec![
16, 0, 0, 0, 0, 0, 0, 0, 123, 34, 110, 97, 109, 101, 34, 58, 34, 65, 108, 105, 99,
101, 34, 125
]
);
let deserialized: ValidatorInfo = deserialize(&[
16, 0, 0, 0, 0, 0, 0, 0, 123, 34, 110, 97, 109, 101, 34, 58, 34, 65, 108, 105, 99, 101,
34, 125,
])
.unwrap();
assert_eq!(deserialized.info, info_string);
}
#[test]
fn test_parse_validator_info() {
let pubkey = Pubkey::new_rand();
let keys = vec![(id(), false), (pubkey, true)];
let config = ConfigKeys { keys };
let mut info = Map::new();
info.insert("name".to_string(), Value::String("Alice".to_string()));
let info_string = serde_json::to_string(&Value::Object(info.clone())).unwrap();
let validator_info = ValidatorInfo {
info: info_string.clone(),
};
let data = serialize(&(config, validator_info)).unwrap();
assert_eq!(
parse_validator_info(
&Pubkey::default(),
&Account {
owner: solana_config_program::id(),
data,
..Account::default()
}
)
.unwrap(),
(pubkey, info)
);
}
#[test]
fn test_validator_info_max_space() {
// 70-character string
let max_short_string =
"Max Length String KWpP299aFCBWvWg1MHpSuaoTsud7cv8zMJsh99aAtP8X1s26yrR1".to_string();
// 300-character string
let max_long_string = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut libero quam, volutpat et aliquet eu, varius in mi. Aenean vestibulum ex in tristique faucibus. Maecenas in imperdiet turpis. Nullam feugiat aliquet erat. Morbi malesuada turpis sed dui pulvinar lobortis. Pellentesque a lectus eu leo nullam.".to_string();
let mut info = Map::new();
info.insert("name".to_string(), Value::String(max_short_string.clone()));
info.insert(
"website".to_string(),
Value::String(max_short_string.clone()),
);
info.insert(
"keybaseUsername".to_string(),
Value::String(max_short_string),
);
info.insert("details".to_string(), Value::String(max_long_string));
let info_string = serde_json::to_string(&Value::Object(info)).unwrap();
let validator_info = ValidatorInfo {
info: info_string.clone(),
};
assert_eq!(
serialized_size(&validator_info).unwrap(),
ValidatorInfo::max_space()
);
}
}
| 36.15009 | 345 | 0.5546 |
8fddd2743bc86d87e2d0b4f1deb86e9ae3562051 | 629 | use ndarray_stats::errors::MinMaxError;
use thiserror::Error;
/// Simplified `Result` using [`NaiveBayesError`](crate::NaiveBayesError) as error type
pub type Result<T> = std::result::Result<T, NaiveBayesError>;
/// Error variants from hyper-parameter construction or model estimation
#[derive(Error, Debug)]
pub enum NaiveBayesError {
/// Error when performing Max operation on data
#[error("invalid statistical operation {0}")]
Stats(#[from] MinMaxError),
/// Invalid smoothing parameter
#[error("invalid smoothing parameter {0}")]
InvalidSmoothing(f64),
#[error(transparent)]
CoreError(#[from] crate::core::Error)
}
| 33.105263 | 87 | 0.744038 |
f87585c7ae96f5794feb7e9460a05dd612316983 | 7,424 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {fidl_fuchsia_bluetooth_sys::TechnologyType, std::collections::HashMap};
use crate::{
assert_satisfies,
expectation::{Predicate as P, *},
over,
types::{Address, Peer, PeerId},
};
const TEST_PEER_NAME: &'static str = "TestPeer";
const INCORRECT_PEER_NAME: &'static str = "IncorrectPeer";
const TEST_PEER_ADDRESS: [u8; 6] = [1, 0, 0, 0, 0, 0];
const INCORRECT_PEER_ADDRESS: [u8; 6] = [2, 0, 0, 0, 0, 0];
fn correct_name() -> Predicate<Peer> {
peer::name(TEST_PEER_NAME)
}
fn incorrect_name() -> Predicate<Peer> {
peer::name(INCORRECT_PEER_NAME)
}
fn correct_address() -> Predicate<Peer> {
peer::address(Address::Public(TEST_PEER_ADDRESS))
}
fn incorrect_address() -> Predicate<Peer> {
peer::address(Address::Public(INCORRECT_PEER_ADDRESS))
}
fn test_peer() -> Peer {
Peer {
name: Some(TEST_PEER_NAME.into()),
address: Address::Public(TEST_PEER_ADDRESS),
technology: TechnologyType::LowEnergy,
connected: false,
bonded: false,
appearance: None,
id: PeerId(1),
rssi: None,
tx_power: None,
device_class: None,
le_services: vec![],
bredr_services: vec![],
}
}
#[test]
fn test() -> Result<(), DebugString> {
correct_name().assert_satisfied(&test_peer())
}
#[test]
fn simple_predicate_succeeds() {
let predicate =
P::equal(Some(TEST_PEER_NAME.to_string())).over(|dev: &Peer| &dev.name, ".name");
assert!(predicate.satisfied(&test_peer()));
}
#[test]
fn simple_incorrect_predicate_fail() {
let predicate =
P::equal(Some(INCORRECT_PEER_NAME.to_string())).over(|dev: &Peer| &dev.name, ".name");
assert!(!predicate.satisfied(&test_peer()));
}
#[test]
fn predicate_and_both_true_succeeds() {
let predicate = correct_name().and(correct_address());
assert!(predicate.satisfied(&test_peer()));
}
#[test]
fn predicate_and_one_or_more_false_fails() {
let predicate = correct_name().and(incorrect_address());
assert!(!predicate.satisfied(&test_peer()));
let predicate = incorrect_name().and(correct_address());
assert!(!predicate.satisfied(&test_peer()));
let predicate = incorrect_name().and(incorrect_address());
assert!(!predicate.satisfied(&test_peer()));
}
#[test]
fn predicate_or_both_false_fails() {
let predicate = incorrect_name().or(incorrect_address());
assert!(!predicate.satisfied(&test_peer()));
}
#[test]
fn predicate_or_one_or_more_true_succeeds() {
let predicate = correct_name().or(correct_address());
assert_satisfies!(&test_peer(), predicate);
let predicate = incorrect_name().or(correct_address());
assert_satisfies!(&test_peer(), predicate);
let predicate = correct_name().or(incorrect_address());
assert_satisfies!(&test_peer(), predicate);
}
#[test]
fn predicate_not_incorrect_succeeds() {
let predicate = incorrect_name().not();
assert_satisfies!(&test_peer(), predicate);
}
#[test]
fn predicate_not_correct_fails() {
let predicate = correct_name().not();
assert!(!predicate.satisfied(&test_peer()));
}
#[test]
fn incorrect_over_predicate_fails() {
let predicate = over!(Peer: name, P::equal(Some("INCORRECT_NAME".to_string())));
let expected_msg = vec![
"FAILED EXPECTATION",
" .name == Some(\"INCORRECT_NAME\")",
"BECAUSE",
" .name Some(\"TestPeer\") != Some(\"INCORRECT_NAME\")",
]
.join("\n");
assert_eq!(predicate.assert_satisfied(&test_peer()), Err(DebugString(expected_msg)))
}
#[test]
fn incorrect_not_predicate_fails() {
let predicate = over!(Peer: name, P::not_equal(Some(TEST_PEER_NAME.to_string())));
let expected_msg = vec![
"FAILED EXPECTATION",
" .name != Some(\"TestPeer\")",
"BECAUSE",
" .name Some(\"TestPeer\") == Some(\"TestPeer\")",
]
.join("\n");
assert_eq!(predicate.assert_satisfied(&test_peer()), Err(DebugString(expected_msg)))
}
#[test]
fn vec_all_predicate_succeeds() {
let strings = vec!["hello".to_string(), "world".to_string()];
let predicate = P::all(P::not_equal("goodbye".to_string()));
assert_satisfies!(&strings, predicate);
}
#[test]
fn map_keys_iter_all_predicate_succeeds() {
let mut strings: HashMap<String, String> = HashMap::new();
let _ = strings.insert("Hello".to_string(), "World".to_string());
let _ = strings.insert("Goodnight".to_string(), "Moon".to_string());
let predicate = P::iter_all(P::not_equal("goodbye".to_string()));
assert_satisfies!(&strings.keys(), predicate);
}
#[test]
fn map_over_keys_all_predicate_succeeds() {
let mut strings: HashMap<String, String> = HashMap::new();
let _ = strings.insert("Hello".to_string(), "World".to_string());
let _ = strings.insert("Goodnight".to_string(), "Moon".to_string());
let predicate = P::all(P::not_equal("goodbye".to_string())).over_value(
|m: &HashMap<String, String>| m.keys().cloned().collect::<Vec<String>>(),
".keys()",
);
assert_satisfies!(&strings, predicate)
}
// Introduce some example types used in the tests below to validate predicates over structs
/// An example Person
#[derive(Debug, PartialEq, Clone)]
struct Person {
name: String,
age: u64,
}
/// An example Group of Persons
#[derive(Debug, PartialEq, Clone)]
struct Group {
persons: Vec<Person>,
}
#[test]
fn incorrect_compound_all_predicate_fails() {
let test_group = Group {
persons: vec![
Person { name: "Alice".to_string(), age: 40 },
Person { name: "Bob".to_string(), age: 41 },
],
};
let predicate = over!(
Group: persons,
P::all(
over!(Person: name, P::not_equal("Bob".to_string()))
.and(over!(Person: age, P::predicate(|age: &u64| *age < 50, "< 50")))
)
);
let expected_msg = vec![
"FAILED EXPECTATION",
" .persons ALL (.name != \"Bob\") AND (.age < 50)",
"BECAUSE",
" .persons [1] Person { name: \"Bob\", age: 41 } BECAUSE .name \"Bob\" == \"Bob\",",
]
.join("\n");
assert_eq!(predicate.assert_satisfied(&test_group), Err(DebugString(expected_msg)));
}
#[test]
fn incorrect_compound_any_predicate_fails() {
let test_group = Group {
persons: vec![
Person { name: "Alice".to_string(), age: 40 },
Person { name: "Bob".to_string(), age: 41 },
Person { name: "Bob".to_string(), age: 39 },
],
};
let predicate = over!(
Group: persons,
P::any(
over!(Person: name, P::not_equal("Bob".to_string()))
.and(over!(Person: age, P::predicate(|age: &u64| *age > 40, "> 40")))
)
);
let expected_msg = vec![
"FAILED EXPECTATION",
" .persons ANY (.name != \"Bob\") AND (.age > 40)",
"BECAUSE",
" .persons",
" [0] Person { name: \"Alice\", age: 40 } BECAUSE .age NOT > 40,",
" [1] Person { name: \"Bob\", age: 41 } BECAUSE .name \"Bob\" == \"Bob\",",
" [2] Person { name: \"Bob\", age: 39 } BECAUSE (.name \"Bob\" == \"Bob\") AND (.age NOT > 40),"
].join("\n");
assert_eq!(predicate.assert_satisfied(&test_group), Err(DebugString(expected_msg)));
}
| 29.696 | 107 | 0.62069 |
6727d64cef591e53be5c9a1541636527efef93bd | 48,391 | use std::cell::RefCell;
use std::collections::HashMap;
use std::fmt;
use std::ops::Index;
use std::sync::Arc;
use log::debug;
use pcre2_sys::{
PCRE2_CASELESS, PCRE2_DOTALL, PCRE2_EXTENDED, PCRE2_MULTILINE,
PCRE2_UCP, PCRE2_UTF, PCRE2_NO_UTF_CHECK, PCRE2_UNSET,
PCRE2_NEWLINE_ANYCRLF, PCRE2_PARTIAL_HARD
};
use thread_local::CachedThreadLocal;
use crate::error::Error;
use crate::ffi::{Code, CompileContext, MatchConfig, MatchData};
/// Match represents a single match of a regex in a subject string.
///
/// The lifetime parameter `'s` refers to the lifetime of the matched portion
/// of the subject string.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Match<'s> {
subject: &'s [u8],
start: usize,
end: usize,
}
impl<'s> Match<'s> {
/// Returns the starting byte offset of the match in the subject.
#[inline]
pub fn start(&self) -> usize {
self.start
}
/// Returns the ending byte offset of the match in the subject.
#[inline]
pub fn end(&self) -> usize {
self.end
}
/// Returns the matched portion of the subject string.
#[inline]
pub fn as_bytes(&self) -> &'s [u8] {
&self.subject[self.start..self.end]
}
/// Creates a new match from the given subject string and byte offsets.
fn new(subject: &'s [u8], start: usize, end: usize) -> Match<'s> {
Match { subject, start, end }
}
#[cfg(test)]
fn as_pair(&self) -> (usize, usize) {
(self.start, self.end)
}
}
#[derive(Clone, Debug)]
struct Config {
/// PCRE2_CASELESS
caseless: bool,
/// PCRE2_DOTALL
dotall: bool,
/// PCRE2_EXTENDED
extended: bool,
/// PCRE2_MULTILINE
multi_line: bool,
/// PCRE2_NEWLINE_ANYCRLF
crlf: bool,
/// PCRE2_UCP
ucp: bool,
/// PCRE2_UTF
utf: bool,
/// PCRE2_NO_UTF_CHECK
utf_check: bool,
/// use pcre2_jit_compile
jit: JITChoice,
/// Match-time specific configuration knobs.
match_config: MatchConfig,
}
#[derive(Clone, Debug)]
enum JITChoice {
/// Never do JIT compilation.
Never,
/// Always do JIT compilation and return an error if it fails.
Always,
/// Attempt to do JIT compilation but silently fall back to non-JIT.
Attempt,
}
impl Default for Config {
fn default() -> Config {
Config {
caseless: false,
dotall: false,
extended: false,
multi_line: false,
crlf: false,
ucp: false,
utf: false,
utf_check: true,
jit: JITChoice::Never,
match_config: MatchConfig::default(),
}
}
}
/// A builder for configuring the compilation of a PCRE2 regex.
#[derive(Clone, Debug)]
pub struct RegexBuilder {
config: Config,
}
impl RegexBuilder {
/// Create a new builder with a default configuration.
pub fn new() -> RegexBuilder {
RegexBuilder { config: Config::default() }
}
/// Compile the given pattern into a PCRE regex using the current
/// configuration.
///
/// If there was a problem compiling the pattern, then an error is
/// returned.
pub fn build(&self, pattern: &str) -> Result<Regex, Error> {
let mut options = 0;
if self.config.caseless {
options |= PCRE2_CASELESS;
}
if self.config.dotall {
options |= PCRE2_DOTALL;
}
if self.config.extended {
options |= PCRE2_EXTENDED;
}
if self.config.multi_line {
options |= PCRE2_MULTILINE;
}
if self.config.ucp {
options |= PCRE2_UCP;
options |= PCRE2_UTF;
}
if self.config.utf {
options |= PCRE2_UTF;
}
let mut ctx = CompileContext::new();
if self.config.crlf {
ctx.set_newline(PCRE2_NEWLINE_ANYCRLF)
.expect("PCRE2_NEWLINE_ANYCRLF is a legal value");
}
let mut code = Code::new(pattern, options, ctx)?;
match self.config.jit {
JITChoice::Never => {} // fallthrough
JITChoice::Always => {
code.jit_compile()?;
}
JITChoice::Attempt => {
if let Err(err) = code.jit_compile() {
debug!("JIT compilation failed: {}", err);
}
}
}
let capture_names = code.capture_names()?;
let mut idx = HashMap::new();
for (i, group) in capture_names.iter().enumerate() {
if let Some(ref name) = *group {
idx.insert(name.to_string(), i);
}
}
Ok(Regex {
config: Arc::new(self.config.clone()),
pattern: pattern.to_string(),
code: Arc::new(code),
capture_names: Arc::new(capture_names),
capture_names_idx: Arc::new(idx),
match_data: CachedThreadLocal::new(),
})
}
/// Enables case insensitive matching.
///
/// If the `utf` option is also set, then Unicode case folding is used
/// to determine case insensitivity. When the `utf` option is not set,
/// then only standard ASCII case insensitivity is considered.
///
/// This option corresponds to the `i` flag.
pub fn caseless(&mut self, yes: bool) -> &mut RegexBuilder {
self.config.caseless = yes;
self
}
/// Enables "dot all" matching.
///
/// When enabled, the `.` metacharacter in the pattern matches any
/// character, include `\n`. When disabled (the default), `.` will match
/// any character except for `\n`.
///
/// This option corresponds to the `s` flag.
pub fn dotall(&mut self, yes: bool) -> &mut RegexBuilder {
self.config.dotall = yes;
self
}
/// Enable "extended" mode in the pattern, where whitespace is ignored.
///
/// This option corresponds to the `x` flag.
pub fn extended(&mut self, yes: bool) -> &mut RegexBuilder {
self.config.extended = yes;
self
}
/// Enable multiline matching mode.
///
/// When enabled, the `^` and `$` anchors will match both at the beginning
/// and end of a subject string, in addition to matching at the start of
/// a line and the end of a line. When disabled, the `^` and `$` anchors
/// will only match at the beginning and end of a subject string.
///
/// This option corresponds to the `m` flag.
pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder {
self.config.multi_line = yes;
self
}
/// Enable matching of CRLF as a line terminator.
///
/// When enabled, anchors such as `^` and `$` will match any of the
/// following as a line terminator: `\r`, `\n` or `\r\n`.
///
/// This is disabled by default, in which case, only `\n` is recognized as
/// a line terminator.
pub fn crlf(&mut self, yes: bool) -> &mut RegexBuilder {
self.config.crlf = yes;
self
}
/// Enable Unicode matching mode.
///
/// When enabled, the following patterns become Unicode aware: `\b`, `\B`,
/// `\d`, `\D`, `\s`, `\S`, `\w`, `\W`.
///
/// When set, this implies UTF matching mode. It is not possible to enable
/// Unicode matching mode without enabling UTF matching mode.
///
/// This is disabled by default.
pub fn ucp(&mut self, yes: bool) -> &mut RegexBuilder {
self.config.ucp = yes;
self
}
/// Enable UTF matching mode.
///
/// When enabled, characters are treated as sequences of code units that
/// make up a single codepoint instead of as single bytes. For example,
/// this will cause `.` to match any single UTF-8 encoded codepoint, where
/// as when this is disabled, `.` will any single byte (except for `\n` in
/// both cases, unless "dot all" mode is enabled).
///
/// Note that when UTF matching mode is enabled, every search performed
/// will do a UTF-8 validation check, which can impact performance. The
/// UTF-8 check can be disabled via the `disable_utf_check` option, but it
/// is undefined behavior to enable UTF matching mode and search invalid
/// UTF-8.
///
/// This is disabled by default.
pub fn utf(&mut self, yes: bool) -> &mut RegexBuilder {
self.config.utf = yes;
self
}
/// When UTF matching mode is enabled, this will disable the UTF checking
/// that PCRE2 will normally perform automatically. If UTF matching mode
/// is not enabled, then this has no effect.
///
/// UTF checking is enabled by default when UTF matching mode is enabled.
/// If UTF matching mode is enabled and UTF checking is enabled, then PCRE2
/// will return an error if you attempt to search a subject string that is
/// not valid UTF-8.
///
/// # Safety
///
/// It is undefined behavior to disable the UTF check in UTF matching mode
/// and search a subject string that is not valid UTF-8. When the UTF check
/// is disabled, callers must guarantee that the subject string is valid
/// UTF-8.
pub unsafe fn disable_utf_check(&mut self) -> &mut RegexBuilder {
self.config.utf_check = false;
self
}
/// Enable PCRE2's JIT and return an error if it's not available.
///
/// This generally speeds up matching quite a bit. The downside is that it
/// can increase the time it takes to compile a pattern.
///
/// If the JIT isn't available or if JIT compilation returns an error, then
/// regex compilation will fail with the corresponding error.
///
/// This is disabled by default, and always overrides `jit_if_available`.
pub fn jit(&mut self, yes: bool) -> &mut RegexBuilder {
if yes {
self.config.jit = JITChoice::Always;
} else {
self.config.jit = JITChoice::Never;
}
self
}
/// Enable PCRE2's JIT if it's available.
///
/// This generally speeds up matching quite a bit. The downside is that it
/// can increase the time it takes to compile a pattern.
///
/// If the JIT isn't available or if JIT compilation returns an error,
/// then a debug message with the error will be emitted and the regex will
/// otherwise silently fall back to non-JIT matching.
///
/// This is disabled by default, and always overrides `jit`.
pub fn jit_if_available(&mut self, yes: bool) -> &mut RegexBuilder {
if yes {
self.config.jit = JITChoice::Attempt;
} else {
self.config.jit = JITChoice::Never;
}
self
}
/// Set the maximum size of PCRE2's JIT stack, in bytes. If the JIT is
/// not enabled, then this has no effect.
///
/// When `None` is given, no custom JIT stack will be created, and instead,
/// the default JIT stack is used. When the default is used, its maximum
/// size is 32 KB.
///
/// When this is set, then a new JIT stack will be created with the given
/// maximum size as its limit.
///
/// Increasing the stack size can be useful for larger regular expressions.
///
/// By default, this is set to `None`.
pub fn max_jit_stack_size(
&mut self,
bytes: Option<usize>,
) -> &mut RegexBuilder {
self.config.match_config.max_jit_stack_size = bytes;
self
}
}
/// A compiled PCRE2 regular expression.
///
/// This regex is safe to use from multiple threads simultaneously. For top
/// performance, it is better to clone a new regex for each thread.
pub struct Regex {
/// The configuration used to build the regex.
config: Arc<Config>,
/// The original pattern string.
pattern: String,
/// The underlying compiled PCRE2 object.
code: Arc<Code>,
/// The capture group names for this regex.
capture_names: Arc<Vec<Option<String>>>,
/// A map from capture group name to capture group index.
capture_names_idx: Arc<HashMap<String, usize>>,
/// Mutable scratch data used by PCRE2 during matching.
///
/// We use the same strategy as Rust's regex crate here, such that each
/// thread gets its own match data to support using a Regex object from
/// multiple threads simultaneously. If some match data doesn't exist for
/// a thread, then a new one is created on demand.
match_data: CachedThreadLocal<RefCell<MatchData>>,
}
impl Clone for Regex {
fn clone(&self) -> Regex {
Regex {
config: Arc::clone(&self.config),
pattern: self.pattern.clone(),
code: Arc::clone(&self.code),
capture_names: Arc::clone(&self.capture_names),
capture_names_idx: Arc::clone(&self.capture_names_idx),
match_data: CachedThreadLocal::new(),
}
}
}
impl fmt::Debug for Regex {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Regex({:?})", self.pattern)
}
}
impl Regex {
/// Compiles a regular expression using the default configuration.
///
/// Once compiled, it can be used repeatedly to search, split or replace
/// text in a string.
///
/// If an invalid expression is given, then an error is returned.
///
/// To configure compilation options for the regex, use the
/// [`RegexBuilder`](struct.RegexBuilder.html).
pub fn new(pattern: &str) -> Result<Regex, Error> {
RegexBuilder::new().build(pattern)
}
/// Returns true if and only if the regex matches the subject string given.
///
/// # Example
///
/// Test if some text contains at least one word with exactly 13 ASCII word
/// bytes:
///
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use pcre2::bytes::Regex;
///
/// let text = b"I categorically deny having triskaidekaphobia.";
/// assert!(Regex::new(r"\b\w{13}\b")?.is_match(text)?);
/// # Ok(()) }; example().unwrap()
/// ```
pub fn is_match(&self, subject: &[u8]) -> Result<bool, Error> {
self.is_match_at(subject, 0)
}
/// Returns true if and only if the regex fully or partially matches the subject string given.
/// A partial match occurs when there is a match up to the end of a subject string,
/// but more characters are needed to match the entire pattern.
///
/// # Example
///
/// Test if given string can be a beginning of a valid telephone number:
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use pcre2::bytes::Regex;
///
/// let text = b"123-456-";
/// assert!(Regex::new(r"^\d{3}-\d{3}-\d{3}")?.is_partial_match(text)?);
/// # Ok(()) }; example().unwrap()
/// ```
pub fn is_partial_match(&self, subject: &[u8]) -> Result<bool, Error> {
self.is_partial_match_at(subject, 0)
}
/// Returns the start and end byte range of the leftmost-first match in
/// `subject`. If no match exists, then `None` is returned.
///
/// # Example
///
/// Find the start and end location of the first word with exactly 13
/// ASCII word bytes:
///
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use pcre2::bytes::Regex;
///
/// let text = b"I categorically deny having triskaidekaphobia.";
/// let mat = Regex::new(r"\b\w{13}\b")?.find(text)?.unwrap();
/// assert_eq!((mat.start(), mat.end()), (2, 15));
/// # Ok(()) }; example().unwrap()
/// ```
pub fn find<'s>(
&self,
subject: &'s [u8],
) -> Result<Option<Match<'s>>, Error> {
self.find_at(subject, 0)
}
/// Returns an iterator for each successive non-overlapping match in
/// `subject`, returning the start and end byte indices with respect to
/// `subject`.
///
/// # Example
///
/// Find the start and end location of every word with exactly 13 ASCII
/// word bytes:
///
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use pcre2::bytes::Regex;
///
/// let text = b"Retroactively relinquishing remunerations is reprehensible.";
/// for result in Regex::new(r"\b\w{13}\b")?.find_iter(text) {
/// let mat = result?;
/// println!("{:?}", mat);
/// }
/// # Ok(()) }; example().unwrap()
/// ```
pub fn find_iter<'r, 's>(&'r self, subject: &'s [u8]) -> Matches<'r, 's> {
Matches {
re: self,
match_data: self.match_data(),
subject: subject,
last_end: 0,
last_match: None,
}
}
/// Returns the capture groups corresponding to the leftmost-first
/// match in `subject`. Capture group `0` always corresponds to the entire
/// match. If no match is found, then `None` is returned.
///
/// # Examples
///
/// Say you have some text with movie names and their release years,
/// like "'Citizen Kane' (1941)". It'd be nice if we could search for text
/// looking like that, while also extracting the movie name and its release
/// year separately.
///
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use pcre2::bytes::Regex;
///
/// let re = Regex::new(r"'([^']+)'\s+\((\d{4})\)")?;
/// let text = b"Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text)?.unwrap();
/// assert_eq!(&caps[1], &b"Citizen Kane"[..]);
/// assert_eq!(&caps[2], &b"1941"[..]);
/// assert_eq!(&caps[0], &b"'Citizen Kane' (1941)"[..]);
/// // You can also access the groups by index using the Index notation.
/// // Note that this will panic on an invalid index.
/// assert_eq!(&caps[1], b"Citizen Kane");
/// assert_eq!(&caps[2], b"1941");
/// assert_eq!(&caps[0], b"'Citizen Kane' (1941)");
/// # Ok(()) }; example().unwrap()
/// ```
///
/// Note that the full match is at capture group `0`. Each subsequent
/// capture group is indexed by the order of its opening `(`.
///
/// We can make this example a bit clearer by using *named* capture groups:
///
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use pcre2::bytes::Regex;
///
/// let re = Regex::new(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)")?;
/// let text = b"Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text)?.unwrap();
/// assert_eq!(&caps["title"], &b"Citizen Kane"[..]);
/// assert_eq!(&caps["year"], &b"1941"[..]);
/// assert_eq!(&caps[0], &b"'Citizen Kane' (1941)"[..]);
/// // You can also access the groups by name using the Index notation.
/// // Note that this will panic on an invalid group name.
/// assert_eq!(&caps["title"], b"Citizen Kane");
/// assert_eq!(&caps["year"], b"1941");
/// assert_eq!(&caps[0], b"'Citizen Kane' (1941)");
/// # Ok(()) }; example().unwrap()
/// ```
///
/// Here we name the capture groups, which we can access with the `name`
/// method or the `Index` notation with a `&str`. Note that the named
/// capture groups are still accessible with `get` or the `Index` notation
/// with a `usize`.
///
/// The `0`th capture group is always unnamed, so it must always be
/// accessed with `get(0)` or `[0]`.
pub fn captures<'s>(
&self,
subject: &'s [u8],
) -> Result<Option<Captures<'s>>, Error> {
let mut locs = self.capture_locations();
Ok(self.captures_read(&mut locs, subject)?.map(move |_| Captures {
subject: subject,
locs: locs,
idx: Arc::clone(&self.capture_names_idx),
}))
}
/// Returns an iterator over all the non-overlapping capture groups matched
/// in `subject`. This is operationally the same as `find_iter`, except it
/// yields information about capturing group matches.
///
/// # Example
///
/// We can use this to find all movie titles and their release years in
/// some text, where the movie is formatted like "'Title' (xxxx)":
///
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use std::str;
///
/// use pcre2::bytes::Regex;
///
/// let re = Regex::new(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)")?;
/// let text = b"'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
/// for result in re.captures_iter(text) {
/// let caps = result?;
/// let title = str::from_utf8(&caps["title"]).unwrap();
/// let year = str::from_utf8(&caps["year"]).unwrap();
/// println!("Movie: {:?}, Released: {:?}", title, year);
/// }
/// // Output:
/// // Movie: Citizen Kane, Released: 1941
/// // Movie: The Wizard of Oz, Released: 1939
/// // Movie: M, Released: 1931
/// # Ok(()) }; example().unwrap()
/// ```
pub fn captures_iter<'r, 's>(
&'r self,
subject: &'s [u8],
) -> CaptureMatches<'r, 's> {
CaptureMatches {
re: self,
subject: subject,
last_end: 0,
last_match: None,
}
}
}
/// Advanced or "lower level" search methods.
impl Regex {
/// Returns the same as is_match, but starts the search at the given
/// offset.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `start == 0`.
pub fn is_match_at(
&self,
subject: &[u8],
start: usize,
) -> Result<bool, Error> {
assert!(
start <= subject.len(),
"start ({}) must be <= subject.len() ({})",
start,
subject.len()
);
let mut options = 0;
if !self.config.utf_check {
options |= PCRE2_NO_UTF_CHECK;
}
let match_data = self.match_data();
let mut match_data = match_data.borrow_mut();
// SAFETY: The only unsafe PCRE2 option we potentially use here is
// PCRE2_NO_UTF_CHECK, and that only occurs if the caller executes the
// `disable_utf_check` method, which propagates the safety contract to
// the caller.
Ok(unsafe { match_data.find(&self.code, subject, start, options)? })
}
/// Returns the same as is_partial_match, but starts the search at the given
/// offset.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `start == 0`.
pub fn is_partial_match_at(
&self,
subject: &[u8],
start: usize,
) -> Result<bool, Error> {
assert!(
start <= subject.len(),
"start ({}) must be <= subject.len() ({})",
start,
subject.len()
);
let mut options = PCRE2_PARTIAL_HARD;
if !self.config.utf_check {
options |= PCRE2_NO_UTF_CHECK;
}
let match_data = self.match_data();
let mut match_data = match_data.borrow_mut();
// SAFETY: The only unsafe PCRE2 option we potentially use here is
// PCRE2_NO_UTF_CHECK, and that only occurs if the caller executes the
// `disable_utf_check` method, which propagates the safety contract to
// the caller.
Ok(unsafe { match_data.find(&self.code, subject, start, options)? })
}
/// Returns the same as find, but starts the search at the given
/// offset.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `start == 0`.
pub fn find_at<'s>(
&self,
subject: &'s [u8],
start: usize,
) -> Result<Option<Match<'s>>, Error> {
self.find_at_with_match_data(self.match_data(), subject, start)
}
/// Like find_at, but accepts match data instead of acquiring one itself.
///
/// This is useful for implementing the iterator, which permits avoiding
/// the synchronization overhead of acquiring the match data.
#[inline(always)]
fn find_at_with_match_data<'s>(
&self,
match_data: &RefCell<MatchData>,
subject: &'s [u8],
start: usize,
) -> Result<Option<Match<'s>>, Error> {
assert!(
start <= subject.len(),
"start ({}) must be <= subject.len() ({})",
start,
subject.len()
);
let mut options = 0;
if !self.config.utf_check {
options |= PCRE2_NO_UTF_CHECK;
}
let mut match_data = match_data.borrow_mut();
// SAFETY: The only unsafe PCRE2 option we potentially use here is
// PCRE2_NO_UTF_CHECK, and that only occurs if the caller executes the
// `disable_utf_check` method, which propagates the safety contract to
// the caller.
if unsafe { !match_data.find(&self.code, subject, start, options)? } {
return Ok(None);
}
let ovector = match_data.ovector();
let (s, e) = (ovector[0], ovector[1]);
Ok(Some(Match::new(&subject[s..e], s, e)))
}
/// This is like `captures`, but uses
/// [`CaptureLocations`](struct.CaptureLocations.html)
/// instead of
/// [`Captures`](struct.Captures.html) in order to amortize allocations.
///
/// To create a `CaptureLocations` value, use the
/// `Regex::capture_locations` method.
///
/// This returns the overall match if this was successful, which is always
/// equivalent to the `0`th capture group.
pub fn captures_read<'s>(
&self,
locs: &mut CaptureLocations,
subject: &'s [u8],
) -> Result<Option<Match<'s>>, Error> {
self.captures_read_at(locs, subject, 0)
}
/// Returns the same as `captures_read`, but starts the search at the given
/// offset and populates the capture locations given.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `start == 0`.
pub fn captures_read_at<'s>(
&self,
locs: &mut CaptureLocations,
subject: &'s [u8],
start: usize,
) -> Result<Option<Match<'s>>, Error> {
assert!(
start <= subject.len(),
"start ({}) must be <= subject.len() ({})",
start,
subject.len()
);
let mut options = 0;
if !self.config.utf_check {
options |= PCRE2_NO_UTF_CHECK;
}
// SAFETY: The only unsafe PCRE2 option we potentially use here is
// PCRE2_NO_UTF_CHECK, and that only occurs if the caller executes the
// `disable_utf_check` method, which propagates the safety contract to
// the caller.
if unsafe { !locs.data.find(&self.code, subject, start, options)? } {
return Ok(None);
}
let ovector = locs.data.ovector();
let (s, e) = (ovector[0], ovector[1]);
Ok(Some(Match::new(&subject[s..e], s, e)))
}
}
/// Auxiliary methods.
impl Regex {
/// Returns the original pattern string for this regex.
pub fn as_str(&self) -> &str {
&self.pattern
}
/// Returns a sequence of all capturing groups and their names, if present.
///
/// The length of the slice returned is always equal to the result of
/// `captures_len`, which is the number of capturing groups (including the
/// capturing group for the entire pattern).
///
/// Each entry in the slice is the name of the corresponding capturing
/// group, if one exists. The first capturing group (at index `0`) is
/// always unnamed.
///
/// Capturing groups are indexed by the order of the opening parenthesis.
pub fn capture_names(&self) -> &[Option<String>] {
&self.capture_names
}
/// Returns the number of capturing groups in the pattern.
///
/// This is always 1 more than the number of syntactic groups in the
/// pattern, since the first group always corresponds to the entire match.
pub fn captures_len(&self) -> usize {
self.code.capture_count().expect("a valid capture count from PCRE2")
}
/// Returns an empty set of capture locations that can be reused in
/// multiple calls to `captures_read` or `captures_read_at`.
pub fn capture_locations(&self) -> CaptureLocations {
CaptureLocations {
code: Arc::clone(&self.code),
data: self.new_match_data(),
}
}
fn match_data(&self) -> &RefCell<MatchData> {
let create = || RefCell::new(self.new_match_data());
self.match_data.get_or(create)
}
fn new_match_data(&self) -> MatchData {
MatchData::new(self.config.match_config.clone(), &self.code)
}
}
/// CaptureLocations is a low level representation of the raw offsets of each
/// submatch.
///
/// Primarily, this type is useful when using `Regex` APIs such as
/// `captures_read`, which permits amortizing the allocation in which capture
/// match locations are stored.
///
/// In order to build a value of this type, you'll need to call the
/// `capture_locations` method on the `Regex` being used to execute the search.
/// The value returned can then be reused in subsequent searches.
pub struct CaptureLocations {
code: Arc<Code>,
data: MatchData,
}
impl Clone for CaptureLocations {
fn clone(&self) -> CaptureLocations {
CaptureLocations {
code: Arc::clone(&self.code),
data: MatchData::new(self.data.config().clone(), &self.code),
}
}
}
impl fmt::Debug for CaptureLocations {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut offsets: Vec<Option<usize>> = vec![];
for &offset in self.data.ovector() {
if offset == PCRE2_UNSET {
offsets.push(None);
} else {
offsets.push(Some(offset));
}
}
write!(f, "CaptureLocations(")?;
f.debug_list().entries(offsets).finish()?;
write!(f, ")")
}
}
impl CaptureLocations {
/// Returns the start and end positions of the Nth capture group.
///
/// This returns `None` if `i` is not a valid capture group or if the
/// capture group did not match anything.
///
/// The positions returned are always byte indices with respect to the
/// original subject string matched.
#[inline]
pub fn get(&self, i: usize) -> Option<(usize, usize)> {
let ovec = self.data.ovector();
let s = match ovec.get(i * 2) {
None => return None,
Some(&s) if s == PCRE2_UNSET => return None,
Some(&s) => s,
};
let e = match ovec.get(i * 2 + 1) {
None => return None,
Some(&e) if e == PCRE2_UNSET => return None,
Some(&e) => e,
};
Some((s, e))
}
/// Returns the total number of capturing groups.
///
/// This is always at least `1` since every regex has at least `1`
/// capturing group that corresponds to the entire match.
#[inline]
pub fn len(&self) -> usize {
self.data.ovector().len() / 2
}
}
/// Captures represents a group of captured byte strings for a single match.
///
/// The 0th capture always corresponds to the entire match. Each subsequent
/// index corresponds to the next capture group in the regex. If a capture
/// group is named, then the matched byte string is *also* available via the
/// `name` method. (Note that the 0th capture is always unnamed and so must be
/// accessed with the `get` method.)
///
/// Positions returned from a capture group are always byte indices.
///
/// `'s` is the lifetime of the matched subject string.
pub struct Captures<'s> {
subject: &'s [u8],
locs: CaptureLocations,
idx: Arc<HashMap<String, usize>>,
}
impl<'s> Captures<'s> {
/// Returns the match associated with the capture group at index `i`. If
/// `i` does not correspond to a capture group, or if the capture group
/// did not participate in the match, then `None` is returned.
///
/// # Examples
///
/// Get the text of the match with a default of an empty string if this
/// group didn't participate in the match:
///
/// ```rust
/// # fn example() -> Result<(), ::pcre2::Error> {
/// use pcre2::bytes::Regex;
///
/// let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))")?;
/// let caps = re.captures(b"abc123")?.unwrap();
///
/// let text1 = caps.get(1).map_or(&b""[..], |m| m.as_bytes());
/// let text2 = caps.get(2).map_or(&b""[..], |m| m.as_bytes());
/// assert_eq!(text1, &b"123"[..]);
/// assert_eq!(text2, &b""[..]);
/// # Ok(()) }; example().unwrap()
/// ```
pub fn get(&self, i: usize) -> Option<Match<'s>> {
self.locs.get(i).map(|(s, e)| Match::new(self.subject, s, e))
}
/// Returns the match for the capture group named `name`. If `name` isn't a
/// valid capture group or didn't match anything, then `None` is returned.
pub fn name(&self, name: &str) -> Option<Match<'s>> {
self.idx.get(name).and_then(|&i| self.get(i))
}
/// Returns the number of captured groups.
///
/// This is always at least `1`, since every regex has at least one capture
/// group that corresponds to the full match.
#[inline]
pub fn len(&self) -> usize {
self.locs.len()
}
}
impl<'s> fmt::Debug for Captures<'s> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Captures").field(&CapturesDebug(self)).finish()
}
}
struct CapturesDebug<'c, 's: 'c>(&'c Captures<'s>);
impl<'c, 's> fmt::Debug for CapturesDebug<'c, 's> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn escape_bytes(bytes: &[u8]) -> String {
let mut s = String::new();
for &b in bytes {
s.push_str(&escape_byte(b));
}
s
}
fn escape_byte(byte: u8) -> String {
use std::ascii::escape_default;
let escaped: Vec<u8> = escape_default(byte).collect();
String::from_utf8_lossy(&escaped).into_owned()
}
// We'd like to show something nice here, even if it means an
// allocation to build a reverse index.
let slot_to_name: HashMap<&usize, &String> =
self.0.idx.iter().map(|(a, b)| (b, a)).collect();
let mut map = f.debug_map();
for slot in 0..self.0.len() {
let m = self.0.locs.get(slot).map(|(s, e)| {
escape_bytes(&self.0.subject[s..e])
});
if let Some(name) = slot_to_name.get(&slot) {
map.entry(&name, &m);
} else {
map.entry(&slot, &m);
}
}
map.finish()
}
}
/// Get a group by index.
///
/// `'s` is the lifetime of the matched subject string.
///
/// The subject can't outlive the `Captures` object if this method is
/// used, because of how `Index` is defined (normally `a[i]` is part
/// of `a` and can't outlive it); to do that, use `get()` instead.
///
/// # Panics
///
/// If there is no group at the given index.
impl<'s> Index<usize> for Captures<'s> {
type Output = [u8];
fn index(&self, i: usize) -> &[u8] {
self.get(i).map(|m| m.as_bytes())
.unwrap_or_else(|| panic!("no group at index '{}'", i))
}
}
/// Get a group by name.
///
/// `'s` is the lifetime of the matched subject string and `'i` is the lifetime
/// of the group name (the index).
///
/// The text can't outlive the `Captures` object if this method is
/// used, because of how `Index` is defined (normally `a[i]` is part
/// of `a` and can't outlive it); to do that, use `name` instead.
///
/// # Panics
///
/// If there is no group named by the given value.
impl<'s, 'i> Index<&'i str> for Captures<'s> {
type Output = [u8];
fn index<'a>(&'a self, name: &'i str) -> &'a [u8] {
self.name(name).map(|m| m.as_bytes())
.unwrap_or_else(|| panic!("no group named '{}'", name))
}
}
/// An iterator over all non-overlapping matches for a particular subject
/// string.
///
/// The iterator yields matches (if no error occurred while searching)
/// corresponding to the start and end of the match. The indices are byte
/// offsets. The iterator stops when no more matches can be found.
///
/// `'r` is the lifetime of the compiled regular expression and `'s` is the
/// lifetime of the subject string.
pub struct Matches<'r, 's> {
re: &'r Regex,
match_data: &'r RefCell<MatchData>,
subject: &'s [u8],
last_end: usize,
last_match: Option<usize>,
}
impl<'r, 's> Iterator for Matches<'r, 's> {
type Item = Result<Match<'s>, Error>;
fn next(&mut self) -> Option<Result<Match<'s>, Error>> {
if self.last_end > self.subject.len() {
return None;
}
let res = self.re.find_at_with_match_data(
self.match_data,
self.subject,
self.last_end,
);
let m = match res {
Err(err) => return Some(Err(err)),
Ok(None) => return None,
Ok(Some(m)) => m,
};
if m.start() == m.end() {
// This is an empty match. To ensure we make progress, start
// the next search at the smallest possible starting position
// of the next match following this one.
self.last_end = m.end() + 1;
// Don't accept empty matches immediately following a match.
// Just move on to the next match.
if Some(m.end()) == self.last_match {
return self.next();
}
} else {
self.last_end = m.end();
}
self.last_match = Some(m.end());
Some(Ok(m))
}
}
/// An iterator that yields all non-overlapping capture groups matching a
/// particular regular expression.
///
/// The iterator stops when no more matches can be found.
///
/// `'r` is the lifetime of the compiled regular expression and `'s` is the
/// lifetime of the subject string.
pub struct CaptureMatches<'r, 's> {
re: &'r Regex,
subject: &'s [u8],
last_end: usize,
last_match: Option<usize>,
}
impl<'r, 's> Iterator for CaptureMatches<'r, 's> {
type Item = Result<Captures<'s>, Error>;
fn next(&mut self) -> Option<Result<Captures<'s>, Error>> {
if self.last_end > self.subject.len() {
return None;
}
let mut locs = self.re.capture_locations();
let res = self.re.captures_read_at(
&mut locs,
self.subject,
self.last_end,
);
let m = match res {
Err(err) => return Some(Err(err)),
Ok(None) => return None,
Ok(Some(m)) => m,
};
if m.start() == m.end() {
// This is an empty match. To ensure we make progress, start
// the next search at the smallest possible starting position
// of the next match following this one.
self.last_end = m.end() + 1;
// Don't accept empty matches immediately following a match.
// Just move on to the next match.
if Some(m.end()) == self.last_match {
return self.next();
}
} else {
self.last_end = m.end();
}
self.last_match = Some(m.end());
Some(Ok(Captures {
subject: self.subject,
locs: locs,
idx: Arc::clone(&self.re.capture_names_idx),
}))
}
}
#[cfg(test)]
mod tests {
use super::{Regex, RegexBuilder};
use crate::is_jit_available;
fn b(string: &str) -> &[u8] {
string.as_bytes()
}
fn find_iter_tuples(re: &Regex, subject: &[u8]) -> Vec<(usize, usize)> {
let mut tuples = vec![];
for result in re.find_iter(subject) {
let m = result.unwrap();
tuples.push((m.start(), m.end()));
}
tuples
}
fn cap_iter_tuples(re: &Regex, subject: &[u8]) -> Vec<(usize, usize)> {
let mut tuples = vec![];
for result in re.captures_iter(subject) {
let caps = result.unwrap();
let m = caps.get(0).unwrap();
tuples.push((m.start(), m.end()));
}
tuples
}
#[test]
fn caseless() {
let re = RegexBuilder::new()
.caseless(true)
.build("a")
.unwrap();
assert!(re.is_match(b("A")).unwrap());
let re = RegexBuilder::new()
.caseless(true)
.ucp(true)
.build("β")
.unwrap();
assert!(re.is_match(b("Β")).unwrap());
}
#[test]
fn partial() {
let re = RegexBuilder::new()
.build("ab$")
.unwrap();
assert!(re.is_partial_match(b("a")).unwrap());
assert!(re.is_partial_match(b("ab")).unwrap());
assert!(!re.is_partial_match(b("abc")).unwrap());
assert!(!re.is_partial_match(b("b")).unwrap());
}
#[test]
fn crlf() {
let re = RegexBuilder::new()
.crlf(true)
.build("a$")
.unwrap();
let m = re.find(b("a\r\n")).unwrap().unwrap();
assert_eq!(m.as_pair(), (0, 1));
}
#[test]
fn dotall() {
let re = RegexBuilder::new()
.dotall(false)
.build(".")
.unwrap();
assert!(!re.is_match(b("\n")).unwrap());
let re = RegexBuilder::new()
.dotall(true)
.build(".")
.unwrap();
assert!(re.is_match(b("\n")).unwrap());
}
#[test]
fn extended() {
let re = RegexBuilder::new()
.extended(true)
.build("a b c")
.unwrap();
assert!(re.is_match(b("abc")).unwrap());
}
#[test]
fn multi_line() {
let re = RegexBuilder::new()
.multi_line(false)
.build("^abc$")
.unwrap();
assert!(!re.is_match(b("foo\nabc\nbar")).unwrap());
let re = RegexBuilder::new()
.multi_line(true)
.build("^abc$")
.unwrap();
assert!(re.is_match(b("foo\nabc\nbar")).unwrap());
}
#[test]
fn ucp() {
let re = RegexBuilder::new()
.ucp(false)
.build(r"\w")
.unwrap();
assert!(!re.is_match(b("β")).unwrap());
let re = RegexBuilder::new()
.ucp(true)
.build(r"\w")
.unwrap();
assert!(re.is_match(b("β")).unwrap());
}
#[test]
fn utf() {
let re = RegexBuilder::new()
.utf(false)
.build(".")
.unwrap();
assert_eq!(re.find(b("β")).unwrap().unwrap().as_pair(), (0, 1));
let re = RegexBuilder::new()
.utf(true)
.build(".")
.unwrap();
assert_eq!(re.find(b("β")).unwrap().unwrap().as_pair(), (0, 2));
}
#[test]
fn jit4lyfe() {
if is_jit_available() {
let re = RegexBuilder::new()
.jit(true)
.build(r"\w")
.unwrap();
assert!(re.is_match(b("a")).unwrap());
} else {
// Check that if JIT isn't enabled, then we get an error if we
// require JIT.
RegexBuilder::new()
.jit(true)
.build(r"\w")
.unwrap_err();
}
}
// Unlike jit4lyfe, this tests that everything works when requesting the
// JIT only if it's available. In jit4lyfe, we require the JIT or fail.
// If the JIT isn't available, then in this test, we simply don't use it.
#[test]
fn jit_if_available() {
let re = RegexBuilder::new()
.jit_if_available(true)
.build(r"\w")
.unwrap();
assert!(re.is_match(b("a")).unwrap());
}
// This tests a regression caused a segfault in the pcre2 library
// https://github.com/BurntSushi/rust-pcre2/issues/10
#[test]
fn jit_test_lazy_alloc_subject() {
let subject: Vec<u8> = vec![];
let re = RegexBuilder::new()
.jit_if_available(true)
.build(r"xxxx|xxxx|xxxx")
.unwrap();
assert!(!re.is_match(&subject).unwrap());
}
#[test]
fn utf_with_invalid_data() {
let re = RegexBuilder::new()
.build(r".")
.unwrap();
assert_eq!(re.find(b"\xFF").unwrap().unwrap().as_pair(), (0, 1));
let re = RegexBuilder::new()
.utf(true)
.build(r".")
.unwrap();
assert!(re.find(b"\xFF").is_err());
}
#[test]
fn capture_names() {
let re = RegexBuilder::new()
.build(
r"(?P<foo>abc)|(def)|(?P<a>ghi)|(?P<springsteen>jkl)"
)
.unwrap();
assert_eq!(re.capture_names().to_vec(), vec![
None,
Some("foo".to_string()),
None,
Some("a".to_string()),
Some("springsteen".to_string()),
]);
// Test our internal map as well.
assert_eq!(re.capture_names_idx.len(), 3);
assert_eq!(re.capture_names_idx["foo"], 1);
assert_eq!(re.capture_names_idx["a"], 3);
assert_eq!(re.capture_names_idx["springsteen"], 4);
}
#[test]
fn captures_get() {
let re = Regex::new(r"[a-z]+(?:([0-9]+)|([A-Z]+))").unwrap();
let caps = re.captures(b"abc123").unwrap().unwrap();
let text1 = caps.get(1).map_or(&b""[..], |m| m.as_bytes());
let text2 = caps.get(2).map_or(&b""[..], |m| m.as_bytes());
assert_eq!(text1, &b"123"[..]);
assert_eq!(text2, &b""[..]);
}
#[test]
fn find_iter_empty() {
let re = Regex::new(r"(?m:^)").unwrap();
assert_eq!(find_iter_tuples(&re, b""), vec![(0, 0)]);
assert_eq!(find_iter_tuples(&re, b"\n"), vec![(0, 0)]);
assert_eq!(find_iter_tuples(&re, b"\n\n"), vec![(0, 0), (1, 1)]);
assert_eq!(find_iter_tuples(&re, b"\na\n"), vec![(0, 0), (1, 1)]);
assert_eq!(find_iter_tuples(&re, b"\na\n\n"), vec![
(0, 0), (1, 1), (3, 3),
]);
}
#[test]
fn captures_iter_empty() {
let re = Regex::new(r"(?m:^)").unwrap();
assert_eq!(cap_iter_tuples(&re, b""), vec![(0, 0)]);
assert_eq!(cap_iter_tuples(&re, b"\n"), vec![(0, 0)]);
assert_eq!(cap_iter_tuples(&re, b"\n\n"), vec![(0, 0), (1, 1)]);
assert_eq!(cap_iter_tuples(&re, b"\na\n"), vec![(0, 0), (1, 1)]);
assert_eq!(cap_iter_tuples(&re, b"\na\n\n"), vec![
(0, 0), (1, 1), (3, 3),
]);
}
#[test]
fn max_jit_stack_size_does_something() {
if !is_jit_available() {
return;
}
let hundred = "\
ABCDEFGHIJKLMNOPQRSTUVWXY\
ABCDEFGHIJKLMNOPQRSTUVWXY\
ABCDEFGHIJKLMNOPQRSTUVWXY\
ABCDEFGHIJKLMNOPQRSTUVWXY\
";
let hay = format!("{}", hundred.repeat(100));
// First, try a regex that checks that we can blow the JIT stack limit.
let re = RegexBuilder::new()
.ucp(true)
.jit(true)
.max_jit_stack_size(Some(1))
.build(r"((((\w{10})){100}))+")
.unwrap();
let result = re.is_match(hay.as_bytes());
if result.is_ok() {
// Skip this test, since for some reason we weren't able to blow
// the stack limit.
return;
}
let err = result.unwrap_err();
assert!(err.to_string().contains("JIT stack limit reached"));
// Now bump up the JIT stack limit and check that it succeeds.
let re = RegexBuilder::new()
.ucp(true)
.jit(true)
.max_jit_stack_size(Some(1<<20))
.build(r"((((\w{10})){100}))+")
.unwrap();
assert!(re.is_match(hay.as_bytes()).unwrap());
}
}
| 33.373103 | 98 | 0.55841 |
0ac2af4f3ca036a132d7bb8f242f94895a665ccf | 3,544 | // Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate pnet;
use pnet::datalink;
use pnet::packet::ethernet::{EtherTypes, MutableEthernetPacket};
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv4::{self, MutableIpv4Packet};
use pnet::packet::udp::{self, MutableUdpPacket};
use pnet::packet::MutablePacket;
use std::env;
use std::net::Ipv4Addr;
static IPV4_HEADER_LEN: usize = 20;
static UDP_HEADER_LEN: usize = 8;
static TEST_DATA_LEN: usize = 5;
pub fn build_ipv4_header(packet: &mut [u8]) -> MutableIpv4Packet {
let mut ip_header = MutableIpv4Packet::new(packet).unwrap();
let total_len = (IPV4_HEADER_LEN + UDP_HEADER_LEN + TEST_DATA_LEN) as u16;
ip_header.set_version(4);
ip_header.set_header_length(5);
ip_header.set_total_length(total_len);
ip_header.set_ttl(4);
ip_header.set_next_level_protocol(IpNextHeaderProtocols::Udp);
ip_header.set_source(Ipv4Addr::new(127, 0, 0, 1));
ip_header.set_destination(Ipv4Addr::new(127, 0, 0, 1));
let checksum = ipv4::checksum(&ip_header.to_immutable());
ip_header.set_checksum(checksum);
ip_header
}
pub fn build_udp_header(packet: &mut [u8]) -> MutableUdpPacket {
let mut udp_header = MutableUdpPacket::new(packet).unwrap();
udp_header.set_source(1234); // Arbitary port number
udp_header.set_destination(1234);
udp_header.set_length((UDP_HEADER_LEN + TEST_DATA_LEN) as u16);
udp_header
}
pub fn build_udp4_packet(packet: &mut [u8], msg: &str) {
let mut ip_header = build_ipv4_header(packet);
let source = ip_header.get_source();
let destination = ip_header.get_destination();
let mut udp_header = build_udp_header(ip_header.payload_mut());
{
let data = udp_header.payload_mut();
let msg = msg.as_bytes();
data[0] = msg[0];
data[1] = msg[1];
data[2] = msg[2];
data[3] = msg[3];
data[4] = msg[4];
}
let checksum = udp::ipv4_checksum(&udp_header.to_immutable(), &source, &destination);
udp_header.set_checksum(checksum);
}
fn main() {
use pnet::datalink::Channel::Ethernet;
let interface_name = env::args().nth(1).unwrap();
let destination = (&env::args().nth(2).unwrap()[..]).parse().unwrap();
// Find the network interface with the provided name
let interfaces = datalink::interfaces();
let interface = interfaces
.iter()
.filter(|iface| iface.name == interface_name)
.next()
.unwrap();
// Create a channel to send on
let mut tx = match datalink::channel(interface, Default::default()) {
Ok(Ethernet(tx, _)) => tx,
Ok(_) => panic!("rs_sender: unhandled channel type"),
Err(e) => panic!("rs_sender: unable to create channel: {}", e),
};
let mut buffer = [0u8; 64];
{
let mut mut_ethernet_header = MutableEthernetPacket::new(&mut buffer[..]).unwrap();
mut_ethernet_header.set_destination(destination);
mut_ethernet_header.set_source(interface.mac_address());
mut_ethernet_header.set_ethertype(EtherTypes::Ipv4);
build_udp4_packet(mut_ethernet_header.payload_mut(), "rmesg");
}
loop {
tx.send_to(&buffer, None);
}
}
| 33.121495 | 91 | 0.676072 |
de1fdde4fa5bbc941d6330e8f278f2cf9816343b | 11,700 | //! This module contains the current mess that is error handling.
use crate::protocol::xproto::{SetupAuthenticate, SetupFailed};
use crate::x11_utils::X11Error;
/// An error occurred while dynamically loading libxcb.
#[cfg(feature = "dl-libxcb")]
#[derive(Debug, Clone)]
pub enum LibxcbLoadError {
/// Could not open the library. The `OsString` is the library
/// file name and the string is the reason.
OpenLibError(std::ffi::OsString, String),
/// Could not get a symbol from the library. The byte vector is the
/// symbol name and the string is the reason.
GetSymbolError(Vec<u8>, String),
}
#[cfg(feature = "dl-libxcb")]
impl std::fmt::Display for LibxcbLoadError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
LibxcbLoadError::OpenLibError(lib_name, e) => {
write!(f, "failed to open library {:?}: {}", lib_name, e)
}
LibxcbLoadError::GetSymbolError(symbol, e) => write!(
f,
"failed to get symbol \"{}\": {}",
symbol
.iter()
.flat_map(|&c| std::ascii::escape_default(c))
.map(char::from)
.collect::<String>(),
e,
),
}
}
}
#[cfg(feature = "dl-libxcb")]
impl std::error::Error for LibxcbLoadError {}
/// An error occurred while parsing some data
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum ParseError {
/// Not enough data was provided.
InsufficientData,
/// A value did not fit.
///
/// This error can e.g. happen when a value that was received from the X11 server does not fit
/// into an `usize`.
ConversionFailed,
/// The value of an expression could not be computed.
///
/// As an example, the length of the data in `xproto`'s `GetPropertyReply` is described by
/// `value_len * (format / 8)`. The multiplication could cause an overflow, which would be
/// represented by this error.
InvalidExpression,
/// A value was outside of its valid range.
///
/// There are two kinds of situations where this error can happen:
///
/// 1. The protocol was violated and a nonsensical value was found.
/// 2. The user of the API called the wrong parsing function.
///
/// Examples for the first kind of error:
///
/// - One of a set of values should be present (a `<switch>` in xcb-proto-speak), but none of
/// the `<cases>` matched. This can e.g. happen when parsing
/// [`crate::protocol::xinput::InputInfo`].
/// - Parsing a request with a length field that is too small for the request header to fit.
///
/// Examples for the second kind of error:
///
/// - Parsing an X11 error with `response_type != 0`.
/// - Parsing an X11 reply with `response_type != 1`.
/// - Parsing an X11 request with the wrong value for its `minor_opcode`.
InvalidValue,
/// Some file descriptors were expected, but not enough were received.
MissingFileDescriptors,
}
impl std::error::Error for ParseError {}
impl std::fmt::Display for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ParseError::InsufficientData => write!(f, "Insufficient data was provided"),
ParseError::ConversionFailed => {
write!(f, "A value conversion failed due to out of range data")
}
ParseError::InvalidExpression => write!(
f,
"An expression could not be computed, e.g. due to overflow"
),
ParseError::InvalidValue => {
write!(f, "A value could not be parsed into an enumeration")
}
ParseError::MissingFileDescriptors => write!(f, "Missing file descriptors"),
}
}
}
/// An error that occurred while connecting to an X11 server
#[derive(Debug)]
#[non_exhaustive]
pub enum ConnectError {
/// An unknown error occurred.
///
/// One situation were this error is used when libxcb indicates an error that does not match
/// any of the defined error conditions. Thus, libxcb is violating its own API (or new error
/// cases were defined, but are not yet handled by x11rb).
UnknownError,
/// Error while parsing some data, see `ParseError`.
ParseError(ParseError),
/// Out of memory.
///
/// This is `XCB_CONN_CLOSED_MEM_INSUFFICIENT`.
InsufficientMemory,
/// Error during parsing of display string.
///
/// This is `XCB_CONN_CLOSED_PARSE_ERR`.
DisplayParsingError,
/// Server does not have a screen matching the display.
///
/// This is `XCB_CONN_CLOSED_INVALID_SCREEN`.
InvalidScreen,
/// An I/O error occurred on the connection.
IoError(std::io::Error),
/// Invalid ID mask provided by the server.
///
/// The value of `resource_id_mask` in the `Setup` provided by the server was zero.
ZeroIdMask,
/// The server rejected the connection with a `SetupAuthenticate` message.
SetupAuthenticate(SetupAuthenticate),
/// The server rejected the connection with a `SetupFailed` message.
SetupFailed(SetupFailed),
}
impl std::error::Error for ConnectError {}
impl std::fmt::Display for ConnectError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fn display(
f: &mut std::fmt::Formatter<'_>,
prefix: &str,
value: &[u8],
) -> std::fmt::Result {
match std::str::from_utf8(value).ok() {
Some(value) => write!(f, "{}: '{}'", prefix, value),
None => write!(f, "{}: {:?} [message is not utf8]", prefix, value),
}
}
match self {
ConnectError::UnknownError => write!(f, "Unknown connection error"),
ConnectError::InsufficientMemory => write!(f, "Insufficient memory"),
ConnectError::DisplayParsingError => write!(f, "Display parsing error"),
ConnectError::InvalidScreen => write!(f, "Invalid screen"),
ConnectError::ParseError(err) => err.fmt(f),
ConnectError::IoError(err) => err.fmt(f),
ConnectError::ZeroIdMask => write!(f, "XID mask was zero"),
ConnectError::SetupFailed(err) => display(f, "X11 setup failed", &err.reason),
ConnectError::SetupAuthenticate(err) => {
display(f, "X11 authentication failed", &err.reason)
}
}
}
}
impl From<ParseError> for ConnectError {
fn from(err: ParseError) -> Self {
ConnectError::ParseError(err)
}
}
impl From<std::io::Error> for ConnectError {
fn from(err: std::io::Error) -> Self {
ConnectError::IoError(err)
}
}
/// An error that occurred on an already established X11 connection
#[derive(Debug)]
#[non_exhaustive]
pub enum ConnectionError {
/// An unknown error occurred.
///
/// One situation were this error is used when libxcb indicates an error that does not match
/// any of the defined error conditions. Thus, libxcb is violating its own API (or new error
/// cases were defined, but are not yet handled by x11rb).
UnknownError,
/// An X11 extension was not supported by the server.
///
/// This corresponds to `XCB_CONN_CLOSED_EXT_NOTSUPPORTED`.
UnsupportedExtension,
/// A request larger than the maximum request length was sent.
///
/// This corresponds to `XCB_CONN_CLOSED_REQ_LEN_EXCEED`.
MaximumRequestLengthExceeded,
/// File descriptor passing failed.
///
/// This corresponds to `XCB_CONN_CLOSED_FDPASSING_FAILED`.
FdPassingFailed,
/// Error while parsing some data, see `ParseError`.
ParseError(ParseError),
/// Out of memory.
///
/// This is `XCB_CONN_CLOSED_MEM_INSUFFICIENT`.
InsufficientMemory,
/// An I/O error occurred on the connection.
IoError(std::io::Error),
}
impl std::error::Error for ConnectionError {}
impl std::fmt::Display for ConnectionError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ConnectionError::UnknownError => write!(f, "Unknown connection error"),
ConnectionError::UnsupportedExtension => write!(f, "Unsupported extension"),
ConnectionError::InsufficientMemory => write!(f, "Insufficient memory"),
ConnectionError::MaximumRequestLengthExceeded => {
write!(f, "Maximum request length exceeded")
}
ConnectionError::FdPassingFailed => write!(f, "FD passing failed"),
ConnectionError::ParseError(err) => err.fmt(f),
ConnectionError::IoError(err) => err.fmt(f),
}
}
}
impl From<ParseError> for ConnectionError {
fn from(err: ParseError) -> Self {
ConnectionError::ParseError(err)
}
}
impl From<std::io::Error> for ConnectionError {
fn from(err: std::io::Error) -> Self {
ConnectionError::IoError(err)
}
}
/// An error that occurred with some request.
#[derive(Debug)]
pub enum ReplyError {
/// Some error occurred on the X11 connection.
ConnectionError(ConnectionError),
/// The X11 server sent an error in response to a request.
X11Error(X11Error),
}
impl std::error::Error for ReplyError {}
impl std::fmt::Display for ReplyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ReplyError::ConnectionError(e) => write!(f, "{}", e),
ReplyError::X11Error(e) => write!(f, "X11 error {:?}", e),
}
}
}
impl From<ParseError> for ReplyError {
fn from(err: ParseError) -> Self {
Self::from(ConnectionError::from(err))
}
}
impl From<std::io::Error> for ReplyError {
fn from(err: std::io::Error) -> Self {
ConnectionError::from(err).into()
}
}
impl From<ConnectionError> for ReplyError {
fn from(err: ConnectionError) -> Self {
Self::ConnectionError(err)
}
}
impl From<X11Error> for ReplyError {
fn from(err: X11Error) -> Self {
Self::X11Error(err)
}
}
/// An error caused by some request or by the exhaustion of IDs.
#[derive(Debug)]
pub enum ReplyOrIdError {
/// All available IDs have been exhausted.
IdsExhausted,
/// Some error occurred on the X11 connection.
ConnectionError(ConnectionError),
/// The X11 server sent an error in response to a request.
X11Error(X11Error),
}
impl std::fmt::Display for ReplyOrIdError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ReplyOrIdError::IdsExhausted => f.write_str("X11 IDs have been exhausted"),
ReplyOrIdError::ConnectionError(e) => write!(f, "{}", e),
ReplyOrIdError::X11Error(e) => write!(f, "X11 error {:?}", e),
}
}
}
impl std::error::Error for ReplyOrIdError {}
impl From<ParseError> for ReplyOrIdError {
fn from(err: ParseError) -> Self {
ConnectionError::from(err).into()
}
}
impl From<ConnectionError> for ReplyOrIdError {
fn from(err: ConnectionError) -> Self {
ReplyOrIdError::ConnectionError(err)
}
}
impl From<X11Error> for ReplyOrIdError {
fn from(err: X11Error) -> Self {
ReplyOrIdError::X11Error(err)
}
}
impl From<ReplyError> for ReplyOrIdError {
fn from(err: ReplyError) -> Self {
match err {
ReplyError::ConnectionError(err) => ReplyOrIdError::ConnectionError(err),
ReplyError::X11Error(err) => ReplyOrIdError::X11Error(err),
}
}
}
| 32.957746 | 98 | 0.61812 |
39ea93a410ae2efdbed031fee15a415f387fe9e5 | 926 | //! A simple slab allocator for pages in wasm
#![feature(stdsimd)]
#![cfg(target_arch = "wasm32")]
extern crate stdsimd;
use std::ptr;
use stdsimd::arch::wasm32::*;
static mut HEAD: *mut *mut u8 = 0 as _;
#[no_mangle]
pub unsafe extern "C" fn page_alloc() -> *mut u8 {
if !HEAD.is_null() {
let next = *HEAD;
let ret = HEAD;
HEAD = next as *mut _;
return ret as *mut u8;
}
let ret = memory::grow(0, 1);
// if we failed to allocate a page then return null
if ret == -1 {
return ptr::null_mut();
}
((ret as u32) * page_size()) as *mut u8
}
#[no_mangle]
pub unsafe extern "C" fn page_free(page: *mut u8) {
let page = page as *mut *mut u8;
*page = HEAD as *mut u8;
HEAD = page;
}
#[no_mangle]
pub unsafe extern "C" fn memory_used() -> usize {
(page_size() * (memory::size(0) as u32)) as usize
}
fn page_size() -> u32 {
64 * 1024
}
| 19.291667 | 55 | 0.576674 |
89e23ff9d1aaa39ae8d539e259ce6bacbcb90374 | 1,446 | use crate::matrix::ge::Matrix;
use crate::matrix::MatrixError;
use lapack::dgesvd;
impl Matrix {
/// # Singular Value Decomposition
///
/// https://en.wikipedia.org/wiki/Singular_value_decomposition
///
/// `M = U * Sigma * V^T`
/// `(u, sigma, vt)`
pub fn gesvd(mut self) -> Result<(Matrix, Matrix, Matrix), MatrixError> {
if self.rows != self.cols {
return Err(MatrixError::DimensionMismatch);
}
let mut info = 0;
let mut u = Matrix::new(self.rows, self.rows);
let mut sigma = Matrix::new(self.rows, self.cols);
let mut vt = Matrix::new(self.cols, self.cols);
let lwork = 1usize.max(5usize * self.rows.min(self.cols));
unsafe {
dgesvd(
'A' as u8,
'A' as u8,
self.rows as i32,
self.cols as i32,
&mut self.elems,
self.rows as i32,
&mut sigma.elems,
&mut u.elems,
self.rows as i32,
&mut vt.elems,
self.cols as i32,
&mut vec![0.0; lwork],
lwork as i32,
&mut info,
);
}
match info {
0 => Ok((u, sigma, vt)),
_ => Err(MatrixError::LapackRoutineError {
routine: "dgesvd".to_owned(),
info,
}),
}
}
}
| 28.352941 | 77 | 0.460581 |
fcc6d41244693102daa13f7540911358dd75462e | 4,610 | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2020 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to connect `PackFileContentsUI` signals with their corresponding slots.
This module is, and should stay, private, as it's only glue between the `PackFileContentsUI` and `PackFileContentsSlots` structs.
!*/
use super::{PackFileContentsUI, slots::PackFileContentsSlots};
/// This function connects all the actions from the provided `PackFileContentsUI` with their slots in `PackFileContentsSlots`.
///
/// This function is just glue to trigger after initializing both, the actions and the slots. It's here
/// to not pollute the other modules with a ton of connections.
pub unsafe fn set_connections(ui: &PackFileContentsUI, slots: &PackFileContentsSlots) {
//ui.packfile_contents_tree_view.clicked().connect(&slots.open_packedfile_preview);
ui.packfile_contents_tree_view.selection_model().selection_changed().connect(&slots.open_packedfile_preview);
//ui.packfile_contents_tree_view.activated().connect(&slots.open_packedfile_full);
ui.packfile_contents_tree_view.double_clicked().connect(&slots.open_packedfile_full);
// Trigger the filter whenever the "filtered" text or any of his settings changes.
ui.filter_line_edit.text_changed().connect(&slots.filter_change_text);
ui.filter_autoexpand_matches_button.toggled().connect(&slots.filter_change_autoexpand_matches);
ui.filter_case_sensitive_button.toggled().connect(&slots.filter_change_case_sensitive);
ui.packfile_contents_tree_model.item_changed().connect(&slots.update_packfile_state);
ui.packfile_contents_tree_view.custom_context_menu_requested().connect(&slots.contextual_menu);
ui.packfile_contents_tree_view.selection_model().selection_changed().connect(&slots.contextual_menu_enabler);
ui.packfile_contents_tree_view_context_menu.about_to_show().connect(&slots.contextual_menu_enabler);
ui.context_menu_add_file.triggered().connect(&slots.contextual_menu_add_file);
ui.context_menu_add_folder.triggered().connect(&slots.contextual_menu_add_folder);
ui.context_menu_add_from_packfile.triggered().connect(&slots.contextual_menu_add_from_packfile);
ui.context_menu_delete.triggered().connect(&slots.contextual_menu_delete);
ui.context_menu_extract.triggered().connect(&slots.contextual_menu_extract);
ui.context_menu_rename.triggered().connect(&slots.contextual_menu_rename);
ui.context_menu_new_folder.triggered().connect(&slots.contextual_menu_new_folder);
ui.context_menu_new_packed_file_db.triggered().connect(&slots.contextual_menu_new_packed_file_db);
ui.context_menu_new_packed_file_loc.triggered().connect(&slots.contextual_menu_new_packed_file_loc);
ui.context_menu_new_packed_file_text.triggered().connect(&slots.contextual_menu_new_packed_file_text);
ui.context_menu_new_queek_packed_file.triggered().connect(&slots.contextual_menu_new_queek_packed_file);
ui.context_menu_open_decoder.triggered().connect(&slots.contextual_menu_open_decoder);
ui.context_menu_open_dependency_manager.triggered().connect(&slots.contextual_menu_open_dependency_manager);
ui.context_menu_open_containing_folder.triggered().connect(&slots.contextual_menu_open_containing_folder);
ui.context_menu_open_with_external_program.triggered().connect(&slots.contextual_menu_open_in_external_program);
ui.context_menu_open_notes.triggered().connect(&slots.contextual_menu_open_notes);
ui.context_menu_check_tables.triggered().connect(&slots.contextual_menu_tables_check_integrity);
ui.context_menu_merge_tables.triggered().connect(&slots.contextual_menu_tables_merge_tables);
ui.context_menu_update_table.triggered().connect(&slots.contextual_menu_tables_update_table);
ui.context_menu_mass_import_tsv.triggered().connect(&slots.contextual_menu_mass_import_tsv);
ui.context_menu_mass_export_tsv.triggered().connect(&slots.contextual_menu_mass_export_tsv);
ui.packfile_contents_tree_view_expand_all.triggered().connect(&slots.packfile_contents_tree_view_expand_all);
ui.packfile_contents_tree_view_collapse_all.triggered().connect(&slots.packfile_contents_tree_view_collapse_all);
}
| 66.811594 | 129 | 0.793492 |
6ad22820a7d9073c13c54c2967ea60b4859194b2 | 19,014 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Standard library macros
//!
//! This modules contains a set of macros which are exported from the standard
//! library. Each macro is available for use when linking against the standard
//! library.
/// The entry point for panic of Rust threads.
///
/// This macro is used to inject panic into a Rust thread, causing the thread to
/// panic entirely. Each thread's panic can be reaped as the `Box<Any>` type,
/// and the single-argument form of the `panic!` macro will be the value which
/// is transmitted.
///
/// The multi-argument form of this macro panics with a string and has the
/// `format!` syntax for building a string.
///
/// # Current implementation
///
/// If the main thread panics it will terminate all your threads and end your
/// program with code `101`.
///
/// # Examples
///
/// ```should_panic
/// # #![allow(unreachable_code)]
/// panic!();
/// panic!("this is a terrible mistake!");
/// panic!(4); // panic with the value of 4 to be collected elsewhere
/// panic!("this is a {} {message}", "fancy", message = "message");
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable]
macro_rules! panic {
() => ({
panic!("explicit panic")
});
($msg:expr) => ({
$crate::rt::begin_panic_new($msg, {
// static requires less code at runtime, more constant data
static _FILE_LINE_COL: (&'static str, u32, u32) = (file!(), line!(), column!());
&_FILE_LINE_COL
})
});
($fmt:expr, $($arg:tt)+) => ({
$crate::rt::begin_panic_fmt(&format_args!($fmt, $($arg)+), {
// The leading _'s are to avoid dead code warnings if this is
// used inside a dead function. Just `#[allow(dead_code)]` is
// insufficient, since the user may have
// `#[forbid(dead_code)]` and which cannot be overridden.
static _FILE_LINE_COL: (&'static str, u32, u32) = (file!(), line!(), column!());
&_FILE_LINE_COL
})
});
}
/// Macro for printing to the standard output.
///
/// Equivalent to the `println!` macro except that a newline is not printed at
/// the end of the message.
///
/// Note that stdout is frequently line-buffered by default so it may be
/// necessary to use `io::stdout().flush()` to ensure the output is emitted
/// immediately.
///
/// Use `print!` only for the primary output of your program. Use
/// `eprint!` instead to print error and progress messages.
///
/// # Panics
///
/// Panics if writing to `io::stdout()` fails.
///
/// # Examples
///
/// ```
/// use std::io::{self, Write};
///
/// print!("this ");
/// print!("will ");
/// print!("be ");
/// print!("on ");
/// print!("the ");
/// print!("same ");
/// print!("line ");
///
/// io::stdout().flush().unwrap();
///
/// print!("this string has a newline, why not choose println! instead?\n");
///
/// io::stdout().flush().unwrap();
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable]
macro_rules! print {
($($arg:tt)*) => ($crate::io::_print(format_args!($($arg)*)));
}
/// Macro for printing to the standard output, with a newline. On all
/// platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
/// (no additional CARRIAGE RETURN (`\r`/`U+000D`).
///
/// Use the `format!` syntax to write data to the standard output.
/// See `std::fmt` for more information.
///
/// Use `println!` only for the primary output of your program. Use
/// `eprintln!` instead to print error and progress messages.
///
/// # Panics
///
/// Panics if writing to `io::stdout` fails.
///
/// # Examples
///
/// ```
/// println!(); // prints just a newline
/// println!("hello there!");
/// println!("format {} arguments", "some");
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Macro for printing to the standard error.
///
/// Equivalent to the `print!` macro, except that output goes to
/// `io::stderr` instead of `io::stdout`. See `print!` for
/// example usage.
///
/// Use `eprint!` only for error and progress messages. Use `print!`
/// instead for the primary output of your program.
///
/// # Panics
///
/// Panics if writing to `io::stderr` fails.
#[macro_export]
#[stable(feature = "eprint", since = "1.19.0")]
#[allow_internal_unstable]
macro_rules! eprint {
($($arg:tt)*) => ($crate::io::_eprint(format_args!($($arg)*)));
}
/// Macro for printing to the standard error, with a newline.
///
/// Equivalent to the `println!` macro, except that output goes to
/// `io::stderr` instead of `io::stdout`. See `println!` for
/// example usage.
///
/// Use `eprintln!` only for error and progress messages. Use `println!`
/// instead for the primary output of your program.
///
/// # Panics
///
/// Panics if writing to `io::stderr` fails.
#[macro_export]
#[stable(feature = "eprint", since = "1.19.0")]
macro_rules! eprintln {
() => (eprint!("\n"));
($fmt:expr) => (eprint!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (eprint!(concat!($fmt, "\n"), $($arg)*));
}
/// A macro to select an event from a number of receivers.
///
/// This macro is used to wait for the first event to occur on a number of
/// receivers. It places no restrictions on the types of receivers given to
/// this macro, this can be viewed as a heterogeneous select.
///
/// # Examples
///
/// ```
/// #![feature(mpsc_select)]
///
/// use std::thread;
/// use std::sync::mpsc;
///
/// // two placeholder functions for now
/// fn long_running_thread() {}
/// fn calculate_the_answer() -> u32 { 42 }
///
/// let (tx1, rx1) = mpsc::channel();
/// let (tx2, rx2) = mpsc::channel();
///
/// thread::spawn(move|| { long_running_thread(); tx1.send(()).unwrap(); });
/// thread::spawn(move|| { tx2.send(calculate_the_answer()).unwrap(); });
///
/// select! {
/// _ = rx1.recv() => println!("the long running thread finished first"),
/// answer = rx2.recv() => {
/// println!("the answer was: {}", answer.unwrap());
/// }
/// }
/// # drop(rx1.recv());
/// # drop(rx2.recv());
/// ```
///
/// For more information about select, see the `std::sync::mpsc::Select` structure.
#[macro_export]
#[unstable(feature = "mpsc_select", issue = "27800")]
macro_rules! select {
(
$($name:pat = $rx:ident.$meth:ident() => $code:expr),+
) => ({
use $crate::sync::mpsc::Select;
let sel = Select::new();
$( let mut $rx = sel.handle(&$rx); )+
unsafe {
$( $rx.add(); )+
}
let ret = sel.wait();
$( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+
{ unreachable!() }
})
}
#[cfg(test)]
macro_rules! assert_approx_eq {
($a:expr, $b:expr) => ({
let (a, b) = (&$a, &$b);
assert!((*a - *b).abs() < 1.0e-6,
"{} is not approximately equal to {}", *a, *b);
})
}
/// Built-in macros to the compiler itself.
///
/// These macros do not have any corresponding definition with a `macro_rules!`
/// macro, but are documented here. Their implementations can be found hardcoded
/// into libsyntax itself.
#[cfg(dox)]
pub mod builtin {
/// Unconditionally causes compilation to fail with the given error message when encountered.
///
/// For more information, see the [RFC].
///
/// [RFC]: https://github.com/rust-lang/rfcs/blob/master/text/1695-add-error-macro.md
#[unstable(feature = "compile_error_macro", issue = "40872")]
#[macro_export]
macro_rules! compile_error { ($msg:expr) => ({ /* compiler built-in */ }) }
/// The core macro for formatted string creation & output.
///
/// This macro produces a value of type [`fmt::Arguments`]. This value can be
/// passed to the functions in [`std::fmt`] for performing useful functions.
/// All other formatting macros ([`format!`], [`write!`], [`println!`], etc) are
/// proxied through this one.
///
/// For more information, see the documentation in [`std::fmt`].
///
/// [`fmt::Arguments`]: ../std/fmt/struct.Arguments.html
/// [`std::fmt`]: ../std/fmt/index.html
/// [`format!`]: ../std/macro.format.html
/// [`write!`]: ../std/macro.write.html
/// [`println!`]: ../std/macro.println.html
///
/// # Examples
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("hello {}", "world"));
/// assert_eq!(s, format!("hello {}", "world"));
///
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! format_args { ($fmt:expr, $($args:tt)*) => ({
/* compiler built-in */
}) }
/// Inspect an environment variable at compile time.
///
/// This macro will expand to the value of the named environment variable at
/// compile time, yielding an expression of type `&'static str`.
///
/// If the environment variable is not defined, then a compilation error
/// will be emitted. To not emit a compile error, use the `option_env!`
/// macro instead.
///
/// # Examples
///
/// ```
/// let path: &'static str = env!("PATH");
/// println!("the $PATH variable at the time of compiling was: {}", path);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! env { ($name:expr) => ({ /* compiler built-in */ }) }
/// Optionally inspect an environment variable at compile time.
///
/// If the named environment variable is present at compile time, this will
/// expand into an expression of type `Option<&'static str>` whose value is
/// `Some` of the value of the environment variable. If the environment
/// variable is not present, then this will expand to `None`.
///
/// A compile time error is never emitted when using this macro regardless
/// of whether the environment variable is present or not.
///
/// # Examples
///
/// ```
/// let key: Option<&'static str> = option_env!("SECRET_KEY");
/// println!("the secret key might be: {:?}", key);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! option_env { ($name:expr) => ({ /* compiler built-in */ }) }
/// Concatenate identifiers into one identifier.
///
/// This macro takes any number of comma-separated identifiers, and
/// concatenates them all into one, yielding an expression which is a new
/// identifier. Note that hygiene makes it such that this macro cannot
/// capture local variables. Also, as a general rule, macros are only
/// allowed in item, statement or expression position. That means while
/// you may use this macro for referring to existing variables, functions or
/// modules etc, you cannot define a new one with it.
///
/// # Examples
///
/// ```
/// #![feature(concat_idents)]
///
/// # fn main() {
/// fn foobar() -> u32 { 23 }
///
/// let f = concat_idents!(foo, bar);
/// println!("{}", f());
///
/// // fn concat_idents!(new, fun, name) { } // not usable in this way!
/// # }
/// ```
#[unstable(feature = "concat_idents_macro", issue = "29599")]
#[macro_export]
macro_rules! concat_idents {
($($e:ident),*) => ({ /* compiler built-in */ })
}
/// Concatenates literals into a static string slice.
///
/// This macro takes any number of comma-separated literals, yielding an
/// expression of type `&'static str` which represents all of the literals
/// concatenated left-to-right.
///
/// Integer and floating point literals are stringified in order to be
/// concatenated.
///
/// # Examples
///
/// ```
/// let s = concat!("test", 10, 'b', true);
/// assert_eq!(s, "test10btrue");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! concat { ($($e:expr),*) => ({ /* compiler built-in */ }) }
/// A macro which expands to the line number on which it was invoked.
///
/// The expanded expression has type `u32`, and the returned line is not
/// the invocation of the `line!()` macro itself, but rather the first macro
/// invocation leading up to the invocation of the `line!()` macro.
///
/// # Examples
///
/// ```
/// let current_line = line!();
/// println!("defined on line: {}", current_line);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! line { () => ({ /* compiler built-in */ }) }
/// A macro which expands to the column number on which it was invoked.
///
/// The expanded expression has type `u32`, and the returned column is not
/// the invocation of the `column!()` macro itself, but rather the first macro
/// invocation leading up to the invocation of the `column!()` macro.
///
/// # Examples
///
/// ```
/// let current_col = column!();
/// println!("defined on column: {}", current_col);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! column { () => ({ /* compiler built-in */ }) }
/// A macro which expands to the file name from which it was invoked.
///
/// The expanded expression has type `&'static str`, and the returned file
/// is not the invocation of the `file!()` macro itself, but rather the
/// first macro invocation leading up to the invocation of the `file!()`
/// macro.
///
/// # Examples
///
/// ```
/// let this_file = file!();
/// println!("defined in file: {}", this_file);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! file { () => ({ /* compiler built-in */ }) }
/// A macro which stringifies its argument.
///
/// This macro will yield an expression of type `&'static str` which is the
/// stringification of all the tokens passed to the macro. No restrictions
/// are placed on the syntax of the macro invocation itself.
///
/// Note that the expanded results of the input tokens may change in the
/// future. You should be careful if you rely on the output.
///
/// # Examples
///
/// ```
/// let one_plus_one = stringify!(1 + 1);
/// assert_eq!(one_plus_one, "1 + 1");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! stringify { ($t:tt) => ({ /* compiler built-in */ }) }
/// Includes a utf8-encoded file as a string.
///
/// The file is located relative to the current file. (similarly to how
/// modules are found)
///
/// This macro will yield an expression of type `&'static str` which is the
/// contents of the file.
///
/// # Examples
///
/// ```ignore (cannot-doctest-external-file-dependency)
/// let secret_key = include_str!("secret-key.ascii");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! include_str { ($file:expr) => ({ /* compiler built-in */ }) }
/// Includes a file as a reference to a byte array.
///
/// The file is located relative to the current file. (similarly to how
/// modules are found)
///
/// This macro will yield an expression of type `&'static [u8; N]` which is
/// the contents of the file.
///
/// # Examples
///
/// ```ignore (cannot-doctest-external-file-dependency)
/// let secret_key = include_bytes!("secret-key.bin");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! include_bytes { ($file:expr) => ({ /* compiler built-in */ }) }
/// Expands to a string that represents the current module path.
///
/// The current module path can be thought of as the hierarchy of modules
/// leading back up to the crate root. The first component of the path
/// returned is the name of the crate currently being compiled.
///
/// # Examples
///
/// ```
/// mod test {
/// pub fn foo() {
/// assert!(module_path!().ends_with("test"));
/// }
/// }
///
/// test::foo();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! module_path { () => ({ /* compiler built-in */ }) }
/// Boolean evaluation of configuration flags.
///
/// In addition to the `#[cfg]` attribute, this macro is provided to allow
/// boolean expression evaluation of configuration flags. This frequently
/// leads to less duplicated code.
///
/// The syntax given to this macro is the same syntax as [the `cfg`
/// attribute](../book/first-edition/conditional-compilation.html).
///
/// # Examples
///
/// ```
/// let my_directory = if cfg!(windows) {
/// "windows-specific-directory"
/// } else {
/// "unix-directory"
/// };
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! cfg { ($($cfg:tt)*) => ({ /* compiler built-in */ }) }
/// Parse a file as an expression or an item according to the context.
///
/// The file is located relative to the current file (similarly to how
/// modules are found).
///
/// Using this macro is often a bad idea, because if the file is
/// parsed as an expression, it is going to be placed in the
/// surrounding code unhygienically. This could result in variables
/// or functions being different from what the file expected if
/// there are variables or functions that have the same name in
/// the current file.
///
/// # Examples
///
/// Assume there are two files in the same directory with the following
/// contents:
///
/// File 'my_str.in':
///
/// ```ignore (only-for-syntax-highlight)
/// "Hello World!"
/// ```
///
/// File 'main.rs':
///
/// ```ignore (cannot-doctest-external-file-dependency)
/// fn main() {
/// let my_str = include!("my_str.in");
/// println!("{}", my_str);
/// }
/// ```
///
/// Compiling 'main.rs' and running the resulting binary will print "Hello
/// World!".
#[stable(feature = "rust1", since = "1.0.0")]
#[macro_export]
macro_rules! include { ($file:expr) => ({ /* compiler built-in */ }) }
}
| 34.136445 | 97 | 0.58336 |
cc05e6f81bdb7e53fafa23e8d269621bb6732ca5 | 6,548 | use std::borrow::Cow;
use crate::symbol::Symbol;
use crate::types::{AliasRef, Type, TypeContext, TypeEnv, TypeExt};
quick_error! {
#[derive(Debug, PartialEq)]
pub enum Error {
UndefinedType(id: Symbol) {
description("undefined type")
display("Type `{}` does not exist.", id)
}
SelfRecursiveAlias(id: Symbol) {
description("undefined type")
display("Tried to remove self recursive alias `{}`.", id)
}
}
}
#[derive(Debug, Default)]
pub struct AliasRemover {
reduced_aliases: Vec<Symbol>,
}
impl AliasRemover {
pub fn new() -> AliasRemover {
Self::default()
}
pub fn len(&self) -> usize {
self.reduced_aliases.len()
}
pub fn is_empty(&self) -> bool {
self.reduced_aliases.is_empty()
}
pub fn reset(&mut self, to: usize) {
self.reduced_aliases.truncate(to)
}
pub fn canonical_alias<'t, F, T>(
&mut self,
env: &TypeEnv<Type = T>,
interner: &mut impl TypeContext<Symbol, T>,
typ: &'t T,
mut canonical: F,
) -> Result<Cow<'t, T>, Error>
where
F: FnMut(&AliasRef<Symbol, T>) -> bool,
T: TypeExt<Id = Symbol> + Clone + ::std::fmt::Display,
{
Ok(match peek_alias(env, typ) {
Ok(Some(alias)) => {
if self.reduced_aliases.contains(&alias.name) {
return Err(Error::SelfRecursiveAlias(alias.name.clone()));
}
self.reduced_aliases.push(alias.name.clone());
if canonical(alias) {
Cow::Borrowed(typ)
} else {
match alias.typ(interner).apply_args(
alias.params(),
&typ.unapplied_args(),
interner,
) {
Some(typ) => Cow::Owned(
self.canonical_alias(env, interner, &typ, canonical)?
.into_owned(),
),
None => Cow::Borrowed(typ),
}
}
}
_ => Cow::Borrowed(typ),
})
}
pub fn remove_aliases<T>(
&mut self,
env: &TypeEnv<Type = T>,
interner: &mut impl TypeContext<Symbol, T>,
mut typ: T,
) -> Result<T, Error>
where
T: TypeExt<Id = Symbol> + ::std::fmt::Display,
{
loop {
typ = match self.remove_alias(env, interner, &typ)? {
Some(typ) => typ,
None => return Ok(typ),
};
}
}
pub fn remove_alias<T>(
&mut self,
env: &TypeEnv<Type = T>,
interner: &mut impl TypeContext<Symbol, T>,
typ: &T,
) -> Result<Option<T>, Error>
where
T: TypeExt<Id = Symbol> + ::std::fmt::Display,
{
match peek_alias(env, &typ)? {
Some(alias) => {
if self.reduced_aliases.iter().any(|name| *name == alias.name) {
return Err(Error::SelfRecursiveAlias(alias.name.clone()));
}
self.reduced_aliases.push(alias.name.clone());
// Opaque types should only exist as the alias itself
if let Type::Opaque = **alias.unresolved_type() {
return Ok(None);
}
Ok(alias
.typ(interner)
.apply_args(alias.params(), &typ.unapplied_args(), interner))
}
None => Ok(None),
}
}
}
/// Removes type aliases from `typ` until it is an actual type
pub fn remove_aliases<T>(
env: &TypeEnv<Type = T>,
interner: &mut impl TypeContext<Symbol, T>,
mut typ: T,
) -> T
where
T: TypeExt<Id = Symbol> + ::std::fmt::Display,
{
while let Ok(Some(new)) = remove_alias(env, interner, &typ) {
typ = new;
}
typ
}
pub fn remove_aliases_cow<'t, T>(
env: &TypeEnv<Type = T>,
interner: &mut impl TypeContext<Symbol, T>,
typ: &'t T,
) -> Cow<'t, T>
where
T: TypeExt<Id = Symbol> + ::std::fmt::Display,
{
match remove_alias(env, interner, typ) {
Ok(Some(typ)) => Cow::Owned(remove_aliases(env, interner, typ)),
_ => Cow::Borrowed(typ),
}
}
/// Resolves aliases until `canonical` returns `true` for an alias in which case it returns the
/// type that directly contains that alias
pub fn canonical_alias<'t, F, T>(
env: &TypeEnv<Type = T>,
interner: &mut impl TypeContext<Symbol, T>,
typ: &'t T,
mut canonical: F,
) -> Cow<'t, T>
where
F: FnMut(&AliasRef<Symbol, T>) -> bool,
T: TypeExt<Id = Symbol> + Clone + ::std::fmt::Display,
{
match peek_alias(env, typ) {
Ok(Some(alias)) => {
if canonical(alias) {
Cow::Borrowed(typ)
} else {
alias
.typ(interner)
.apply_args(alias.params(), &typ.unapplied_args(), interner)
.map(|typ| {
Cow::Owned(canonical_alias(env, interner, &typ, canonical).into_owned())
})
.unwrap_or_else(|| Cow::Borrowed(typ))
}
}
_ => Cow::Borrowed(typ),
}
}
/// Expand `typ` if it is an alias that can be expanded and return the expanded type.
/// Returns `None` if the type is not an alias or the alias could not be expanded.
pub fn remove_alias<T>(
env: &TypeEnv<Type = T>,
interner: &mut impl TypeContext<Symbol, T>,
typ: &T,
) -> Result<Option<T>, Error>
where
T: TypeExt<Id = Symbol> + ::std::fmt::Display,
{
Ok(peek_alias(env, &typ)?.and_then(|alias| {
// Opaque types should only exist as the alias itself
if let Type::Opaque = **alias.unresolved_type() {
return None;
}
alias
.typ(interner)
.apply_args(alias.params(), &typ.unapplied_args(), interner)
}))
}
pub fn peek_alias<'t, T>(
env: &'t TypeEnv<Type = T>,
typ: &'t T,
) -> Result<Option<&'t AliasRef<Symbol, T>>, Error>
where
T: TypeExt<Id = Symbol> + ::std::fmt::Display,
{
let maybe_alias = typ.applied_alias();
match typ.alias_ident() {
Some(id) => {
let alias = match maybe_alias {
Some(alias) => Some(alias),
None => env.find_type_info(id).map(|a| &**a),
};
Ok(alias)
}
None => Ok(None),
}
}
| 28.973451 | 96 | 0.503971 |
3aae3a05cef964020e71cd1d0731a7dc0e0710b7 | 67,558 | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! X11 window creation and window management.
use std::cell::{Cell, RefCell};
use std::collections::BinaryHeap;
use std::convert::{TryFrom, TryInto};
use std::os::unix::io::RawFd;
use std::panic::Location;
use std::rc::{Rc, Weak};
use std::sync::{Arc, Mutex};
use std::time::Instant;
use crate::scale::Scalable;
use anyhow::{anyhow, Context, Error};
use cairo::{XCBConnection as CairoXCBConnection, XCBDrawable, XCBSurface, XCBVisualType};
use tracing::{error, info, warn};
use x11rb::atom_manager;
use x11rb::connection::Connection;
use x11rb::errors::ReplyOrIdError;
use x11rb::properties::{WmHints, WmHintsState, WmSizeHints};
use x11rb::protocol::present::{CompleteNotifyEvent, ConnectionExt as _, IdleNotifyEvent};
use x11rb::protocol::render::{ConnectionExt as _, Pictformat};
use x11rb::protocol::xfixes::{ConnectionExt as _, Region as XRegion};
use x11rb::protocol::xproto::{
self, AtomEnum, ChangeWindowAttributesAux, ColormapAlloc, ConfigureNotifyEvent,
ConfigureWindowAux, ConnectionExt, CreateGCAux, EventMask, Gcontext, ImageFormat,
ImageOrder as X11ImageOrder, Pixmap, PropMode, Rectangle, Visualtype, WindowClass,
};
use x11rb::wrapper::ConnectionExt as _;
use x11rb::xcb_ffi::XCBConnection;
#[cfg(feature = "raw-win-handle")]
use raw_window_handle::{unix::XcbHandle, HasRawWindowHandle, RawWindowHandle};
use crate::common_util::IdleCallback;
use crate::dialog::FileDialogOptions;
use crate::error::Error as ShellError;
use crate::keyboard::{KeyEvent, KeyState, Modifiers};
use crate::kurbo::{Insets, Point, Rect, Size, Vec2};
use crate::mouse::{Cursor, CursorDesc, MouseButton, MouseButtons, MouseEvent};
use crate::piet::{Piet, PietText, RenderContext};
use crate::region::Region;
use crate::scale::Scale;
use crate::text::{simulate_input, Event};
use crate::window::{
FileDialogToken, IdleToken, TextFieldToken, TimerToken, WinHandler, WindowLevel,
};
use crate::{window, ScaledArea};
use super::application::Application;
use super::keycodes;
use super::menu::Menu;
use super::util::Timer;
/// A version of XCB's `xcb_visualtype_t` struct. This was copied from the [example] in x11rb; it
/// is used to interoperate with cairo.
///
/// The official upstream reference for this struct definition is [here].
///
/// [example]: https://github.com/psychon/x11rb/blob/master/cairo-example/src/main.rs
/// [here]: https://xcb.freedesktop.org/manual/structxcb__visualtype__t.html
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct xcb_visualtype_t {
pub visual_id: u32,
pub class: u8,
pub bits_per_rgb_value: u8,
pub colormap_entries: u16,
pub red_mask: u32,
pub green_mask: u32,
pub blue_mask: u32,
pub pad0: [u8; 4],
}
impl From<Visualtype> for xcb_visualtype_t {
fn from(value: Visualtype) -> xcb_visualtype_t {
xcb_visualtype_t {
visual_id: value.visual_id,
class: value.class.into(),
bits_per_rgb_value: value.bits_per_rgb_value,
colormap_entries: value.colormap_entries,
red_mask: value.red_mask,
green_mask: value.green_mask,
blue_mask: value.blue_mask,
pad0: [0; 4],
}
}
}
fn size_hints(resizable: bool, size: Size, min_size: Size) -> WmSizeHints {
let mut size_hints = WmSizeHints::new();
if resizable {
size_hints.min_size = Some((min_size.width as i32, min_size.height as i32));
} else {
size_hints.min_size = Some((size.width as i32, size.height as i32));
size_hints.max_size = Some((size.width as i32, size.height as i32));
}
size_hints
}
pub(crate) struct WindowBuilder {
app: Application,
handler: Option<Box<dyn WinHandler>>,
title: String,
transparent: bool,
position: Option<Point>,
size: Size,
min_size: Size,
resizable: bool,
level: WindowLevel,
state: Option<window::WindowState>,
}
impl WindowBuilder {
pub fn new(app: Application) -> WindowBuilder {
WindowBuilder {
app,
handler: None,
title: String::new(),
transparent: false,
position: None,
size: Size::new(500.0, 400.0),
min_size: Size::new(0.0, 0.0),
resizable: true,
level: WindowLevel::AppWindow,
state: None,
}
}
pub fn set_handler(&mut self, handler: Box<dyn WinHandler>) {
self.handler = Some(handler);
}
pub fn set_size(&mut self, size: Size) {
// zero sized window results in server error
self.size = if size.width == 0. || size.height == 0. {
Size::new(1., 1.)
} else {
size
};
}
pub fn set_min_size(&mut self, min_size: Size) {
self.min_size = min_size;
}
pub fn resizable(&mut self, resizable: bool) {
self.resizable = resizable;
}
pub fn show_titlebar(&mut self, _show_titlebar: bool) {
// not sure how to do this, maybe _MOTIF_WM_HINTS?
warn!("WindowBuilder::show_titlebar is currently unimplemented for X11 backend.");
}
pub fn set_transparent(&mut self, transparent: bool) {
self.transparent = transparent;
}
pub fn set_position(&mut self, position: Point) {
self.position = Some(position);
}
pub fn set_level(&mut self, level: window::WindowLevel) {
self.level = level;
}
pub fn set_window_state(&mut self, state: window::WindowState) {
self.state = Some(state);
}
pub fn set_title<S: Into<String>>(&mut self, title: S) {
self.title = title.into();
}
pub fn set_menu(&mut self, _menu: Menu) {
// TODO(x11/menus): implement WindowBuilder::set_menu (currently a no-op)
}
/// Registers and returns all the atoms that the window will need.
fn atoms(&self, window_id: u32) -> Result<WindowAtoms, Error> {
let conn = self.app.connection();
let atoms = WindowAtoms::new(conn.as_ref())?
.reply()
.context("get X11 atoms")?;
// Replace the window's WM_PROTOCOLS with the following.
let protocols = [atoms.WM_DELETE_WINDOW];
conn.change_property32(
PropMode::REPLACE,
window_id,
atoms.WM_PROTOCOLS,
AtomEnum::ATOM,
&protocols,
)?
.check()
.context("set WM_PROTOCOLS")?;
Ok(atoms)
}
fn create_cairo_surface(
&self,
window_id: u32,
visual_type: &Visualtype,
) -> Result<XCBSurface, Error> {
let conn = self.app.connection();
let cairo_xcb_connection = unsafe {
CairoXCBConnection::from_raw_none(
conn.get_raw_xcb_connection() as *mut cairo_sys::xcb_connection_t
)
};
let cairo_drawable = XCBDrawable(window_id);
let mut xcb_visual = xcb_visualtype_t::from(*visual_type);
let cairo_visual_type = unsafe {
XCBVisualType::from_raw_none(
&mut xcb_visual as *mut xcb_visualtype_t as *mut cairo_sys::xcb_visualtype_t,
)
};
let cairo_surface = XCBSurface::create(
&cairo_xcb_connection,
&cairo_drawable,
&cairo_visual_type,
self.size.width as i32,
self.size.height as i32,
)
.map_err(|status| anyhow!("Failed to create cairo surface: {}", status))?;
Ok(cairo_surface)
}
// TODO(x11/menus): make menus if requested
pub fn build(self) -> Result<WindowHandle, Error> {
let conn = self.app.connection();
let screen_num = self.app.screen_num();
let id = conn.generate_id()?;
let setup = conn.setup();
let env_dpi = std::env::var("DRUID_X11_DPI")
.ok()
.map(|x| x.parse::<f64>());
let scale = match env_dpi.or_else(|| self.app.rdb.get_value("Xft.dpi", "").transpose()) {
Some(Ok(dpi)) => {
let scale = dpi / 96.;
Scale::new(scale, scale)
}
None => Scale::default(),
Some(Err(err)) => {
let default = Scale::default();
warn!(
"Unable to parse dpi: {:?}, defaulting to {:?}",
err, default
);
default
}
};
let size_px = self.size.to_px(scale);
let screen = setup
.roots
.get(screen_num)
.ok_or_else(|| anyhow!("Invalid screen num: {}", screen_num))?;
let visual_type = if self.transparent {
self.app.argb_visual_type()
} else {
None
};
let (transparent, visual_type) = match visual_type {
Some(visual) => (true, visual),
None => (false, self.app.root_visual_type()),
};
if transparent != self.transparent {
warn!("Windows with transparent backgrounds do not work");
}
let mut cw_values = xproto::CreateWindowAux::new().event_mask(
EventMask::EXPOSURE
| EventMask::STRUCTURE_NOTIFY
| EventMask::KEY_PRESS
| EventMask::KEY_RELEASE
| EventMask::BUTTON_PRESS
| EventMask::BUTTON_RELEASE
| EventMask::POINTER_MOTION,
);
if transparent {
let colormap = conn.generate_id()?;
conn.create_colormap(
ColormapAlloc::NONE,
colormap,
screen.root,
visual_type.visual_id,
)?;
cw_values = cw_values
.border_pixel(screen.white_pixel)
.colormap(colormap);
};
let pos = self.position.unwrap_or_default().to_px(scale);
// Create the actual window
let (width_px, height_px) = (size_px.width as u16, size_px.height as u16);
let depth = if transparent { 32 } else { screen.root_depth };
conn.create_window(
// Window depth
depth,
// The new window's ID
id,
// Parent window of this new window
// TODO(#468): either `screen.root()` (no parent window) or pass parent here to attach
screen.root,
// X-coordinate of the new window
pos.x as _,
// Y-coordinate of the new window
pos.y as _,
// Width of the new window
width_px,
// Height of the new window
height_px,
// Border width
0,
// Window class type
WindowClass::INPUT_OUTPUT,
// Visual ID
visual_type.visual_id,
// Window properties mask
&cw_values,
)?
.check()
.context("create window")?;
if let Some(colormap) = cw_values.colormap {
conn.free_colormap(colormap)?;
}
// Allocate a graphics context (currently used only for copying pixels when present is
// unavailable).
let gc = conn.generate_id()?;
conn.create_gc(gc, id, &CreateGCAux::new())?
.check()
.context("create graphics context")?;
// TODO(x11/errors): Should do proper cleanup (window destruction etc) in case of error
let atoms = self.atoms(id)?;
let cairo_surface = RefCell::new(self.create_cairo_surface(id, &visual_type)?);
let present_data = match self.initialize_present_data(id) {
Ok(p) => Some(p),
Err(e) => {
info!("Failed to initialize present extension: {}", e);
None
}
};
let handler = RefCell::new(self.handler.unwrap());
// When using present, we generally need two buffers (because after we present, we aren't
// allowed to use that buffer for a little while, and so we might want to render to the
// other one). Otherwise, we only need one.
let buf_count = if present_data.is_some() { 2 } else { 1 };
let buffers = RefCell::new(Buffers::new(
conn, id, buf_count, width_px, height_px, depth,
)?);
// Initialize some properties
let pid = nix::unistd::Pid::this().as_raw();
if let Ok(pid) = u32::try_from(pid) {
conn.change_property32(
xproto::PropMode::REPLACE,
id,
atoms._NET_WM_PID,
AtomEnum::CARDINAL,
&[pid],
)?
.check()
.context("set _NET_WM_PID")?;
}
let min_size = self.min_size.to_px(scale);
log_x11!(size_hints(self.resizable, size_px, min_size)
.set_normal_hints(conn.as_ref(), id)
.context("set wm normal hints"));
// TODO: set _NET_WM_STATE
let mut hints = WmHints::new();
if let Some(state) = self.state {
hints.initial_state = Some(match state {
window::WindowState::Maximized => WmHintsState::Normal,
window::WindowState::Minimized => WmHintsState::Iconic,
window::WindowState::Restored => WmHintsState::Normal,
});
}
log_x11!(hints.set(conn.as_ref(), id).context("set wm hints"));
// set level
{
let window_type = match self.level {
WindowLevel::AppWindow => atoms._NET_WM_WINDOW_TYPE_NORMAL,
WindowLevel::Tooltip => atoms._NET_WM_WINDOW_TYPE_TOOLTIP,
WindowLevel::Modal => atoms._NET_WM_WINDOW_TYPE_DIALOG,
WindowLevel::DropDown => atoms._NET_WM_WINDOW_TYPE_DROPDOWN_MENU,
};
let conn = self.app.connection();
log_x11!(conn.change_property32(
xproto::PropMode::REPLACE,
id,
atoms._NET_WM_WINDOW_TYPE,
AtomEnum::ATOM,
&[window_type],
));
if matches!(
self.level,
WindowLevel::DropDown | WindowLevel::Modal | WindowLevel::Tooltip
) {
log_x11!(conn.change_window_attributes(
id,
&ChangeWindowAttributesAux::new().override_redirect(1),
));
}
}
let window = Rc::new(Window {
id,
gc,
app: self.app.clone(),
handler,
cairo_surface,
atoms,
area: Cell::new(ScaledArea::from_px(size_px, scale)),
scale: Cell::new(scale),
min_size,
invalid: RefCell::new(Region::EMPTY),
destroyed: Cell::new(false),
timer_queue: Mutex::new(BinaryHeap::new()),
idle_queue: Arc::new(Mutex::new(Vec::new())),
idle_pipe: self.app.idle_pipe(),
present_data: RefCell::new(present_data),
buffers,
active_text_field: Cell::new(None),
});
window.set_title(&self.title);
if let Some(pos) = self.position {
window.set_position(pos);
}
let handle = WindowHandle::new(id, Rc::downgrade(&window));
window.connect(handle.clone())?;
self.app.add_window(id, window)?;
Ok(handle)
}
fn initialize_present_data(&self, window_id: u32) -> Result<PresentData, Error> {
if self.app.present_opcode().is_some() {
let conn = self.app.connection();
// We use the CompleteNotify events to schedule the next frame, and the IdleNotify
// events to manage our buffers.
let id = conn.generate_id()?;
use x11rb::protocol::present::EventMask;
conn.present_select_input(
id,
window_id,
EventMask::COMPLETE_NOTIFY | EventMask::IDLE_NOTIFY,
)?
.check()
.context("set present event mask")?;
let region_id = conn.generate_id()?;
conn.xfixes_create_region(region_id, &[])
.context("create region")?;
Ok(PresentData {
serial: 0,
region: region_id,
waiting_on: None,
needs_present: false,
last_msc: None,
last_ust: None,
})
} else {
Err(anyhow!("no present opcode"))
}
}
}
/// An X11 window.
//
// We use lots of RefCells here, so to avoid panics we need some rules. The basic observation is
// that there are two ways we can end up calling the code in this file:
//
// 1) it either comes from the system (e.g. through some X11 event), or
// 2) from the client (e.g. druid, calling a method on its `WindowHandle`).
//
// Note that 2 only ever happens as a result of 1 (i.e., the system calls us, we call the client
// using the `WinHandler`, and it calls us back). The rules are:
//
// a) We never call into the system as a result of 2. As a consequence, we never get 1
// re-entrantly.
// b) We *almost* never call into the `WinHandler` while holding any of the other RefCells. There's
// an exception for `paint`. This is enforced by the `with_handler` method.
// (TODO: we could try to encode this exception statically, by making the data accessible in
// case 2 smaller than the data accessible in case 1).
pub(crate) struct Window {
id: u32,
gc: Gcontext,
app: Application,
handler: RefCell<Box<dyn WinHandler>>,
cairo_surface: RefCell<XCBSurface>,
atoms: WindowAtoms,
area: Cell<ScaledArea>,
scale: Cell<Scale>,
// min size in px
min_size: Size,
/// We've told X11 to destroy this window, so don't so any more X requests with this window id.
destroyed: Cell<bool>,
/// The region that was invalidated since the last time we rendered.
invalid: RefCell<Region>,
/// Timers, sorted by "earliest deadline first"
timer_queue: Mutex<BinaryHeap<Timer>>,
idle_queue: Arc<Mutex<Vec<IdleKind>>>,
// Writing to this wakes up the event loop, so that it can run idle handlers.
idle_pipe: RawFd,
/// When this is `Some(_)`, we use the X11 Present extension to present windows. This syncs all
/// presentation to vblank and it appears to prevent tearing (subject to various caveats
/// regarding broken video drivers).
///
/// The Present extension works roughly like this: we submit a pixmap for presentation. It will
/// get drawn at the next vblank, and some time shortly after that we'll get a notification
/// that the drawing was completed.
///
/// There are three ways that rendering can get triggered:
/// 1) We render a frame, and it signals to us that an animation is requested. In this case, we
/// will render the next frame as soon as we get a notification that the just-presented
/// frame completed. In other words, we use `CompleteNotifyEvent` to schedule rendering.
/// 2) We get an expose event telling us that a region got invalidated. In
/// this case, we will render the next frame immediately unless we're already waiting for a
/// completion notification. (If we are waiting for a completion notification, we just make
/// a note to schedule a new frame once we get it.)
/// 3) Someone calls `invalidate` or `invalidate_rect` on us. We schedule ourselves to repaint
/// in the idle loop. This is better than rendering straight away, because for example they
/// might have called `invalidate` from their paint callback, and then we'd end up painting
/// re-entrantively.
///
/// This is probably not the best (or at least, not the lowest-latency) scheme we can come up
/// with, because invalidations that happen shortly after a vblank might need to wait 2 frames
/// before they appear. If we're getting lots of invalidations, it might be better to render more
/// than once per frame. Note that if we do, it will require some changes to part 1) above,
/// because if we render twice in a frame then we will get two completion notifications in a
/// row, so we don't want to present on both of them. The `msc` field of the completion
/// notification might be useful here, because it allows us to check how many frames have
/// actually been presented.
present_data: RefCell<Option<PresentData>>,
buffers: RefCell<Buffers>,
active_text_field: Cell<Option<TextFieldToken>>,
}
// This creates a `struct WindowAtoms` containing the specified atoms as members (along with some
// convenience methods to intern and query those atoms). We use the following atoms:
//
// WM_PROTOCOLS
//
// List of atoms that identify the communications protocols between
// the client and window manager in which the client is willing to participate.
//
// https://www.x.org/releases/X11R7.6/doc/xorg-docs/specs/ICCCM/icccm.html#wm_protocols_property
//
// WM_DELETE_WINDOW
//
// Including this atom in the WM_PROTOCOLS property on each window makes sure that
// if the window manager respects WM_DELETE_WINDOW it will send us the event.
//
// The WM_DELETE_WINDOW event is sent when there is a request to close the window.
// Registering for but ignoring this event means that the window will remain open.
//
// https://www.x.org/releases/X11R7.6/doc/xorg-docs/specs/ICCCM/icccm.html#window_deletion
//
// _NET_WM_PID
//
// A property containing the PID of the process that created the window.
//
// https://specifications.freedesktop.org/wm-spec/wm-spec-1.3.html#idm45805407915360
//
// _NET_WM_NAME
//
// A version of WM_NAME supporting UTF8 text.
//
// https://specifications.freedesktop.org/wm-spec/wm-spec-1.3.html#idm45805407982336
//
// UTF8_STRING
//
// The type of _NET_WM_NAME
atom_manager! {
WindowAtoms: WindowAtomsCookie {
WM_PROTOCOLS,
WM_DELETE_WINDOW,
_NET_WM_PID,
_NET_WM_NAME,
UTF8_STRING,
_NET_WM_WINDOW_TYPE,
_NET_WM_WINDOW_TYPE_NORMAL,
_NET_WM_WINDOW_TYPE_DROPDOWN_MENU,
_NET_WM_WINDOW_TYPE_TOOLTIP,
_NET_WM_WINDOW_TYPE_DIALOG,
}
}
/// A collection of pixmaps for rendering to. This gets used in two different ways: if the present
/// extension is enabled, we render to a pixmap and then present it. If the present extension is
/// disabled, we render to a pixmap and then call `copy_area` on it (this probably isn't the best
/// way to imitate double buffering, but it's the fallback anyway).
struct Buffers {
/// A list of idle pixmaps. We take a pixmap from here for rendering to.
///
/// When we're not using the present extension, all pixmaps belong in here; as soon as we copy
/// from one, we can use it again.
///
/// When we submit a pixmap to present, we're not allowed to touch it again until we get a
/// corresponding IDLE_NOTIFY event. In my limited experiments this happens shortly after
/// vsync, meaning that we may want to start rendering the next pixmap before we get the old
/// one back. Therefore, we keep a list of pixmaps. We pop one each time we render, and push
/// one when we get IDLE_NOTIFY.
///
/// Since the current code only renders at most once per vsync, two pixmaps seems to always be
/// enough. Nevertheless, we will allocate more on the fly if we need them. Note that rendering
/// more than once per vsync can only improve latency, because only the most recently-presented
/// pixmap will get rendered.
idle_pixmaps: Vec<Pixmap>,
/// A list of all the allocated pixmaps (including the idle ones).
all_pixmaps: Vec<Pixmap>,
/// The sizes of the pixmaps (they all have the same size). In order to avoid repeatedly
/// reallocating as the window size changes, we allow these to be bigger than the window.
width: u16,
height: u16,
/// The depth of the currently allocated pixmaps.
depth: u8,
}
/// The state involved in using X's [Present] extension.
///
/// [Present]: https://cgit.freedesktop.org/xorg/proto/presentproto/tree/presentproto.txt
#[derive(Debug)]
struct PresentData {
/// A monotonically increasing present request counter.
serial: u32,
/// The region that we use for telling X what to present.
region: XRegion,
/// Did we submit a present that hasn't completed yet? If so, this is its serial number.
waiting_on: Option<u32>,
/// We need to render another frame as soon as the current one is done presenting.
needs_present: bool,
/// The last MSC (media stream counter) that was completed. This can be used to diagnose
/// latency problems, because MSC is a frame counter: it increments once per frame. We should
/// be presenting on every frame, and storing the last completed MSC lets us know if we missed
/// one.
last_msc: Option<u64>,
/// The time at which the last frame was completed. The present protocol documentation doesn't
/// define the units, but it appears to be in microseconds.
last_ust: Option<u64>,
}
#[derive(Clone, PartialEq)]
pub struct CustomCursor(xproto::Cursor);
impl Window {
#[track_caller]
fn with_handler<T, F: FnOnce(&mut dyn WinHandler) -> T>(&self, f: F) -> Option<T> {
if self.cairo_surface.try_borrow_mut().is_err()
|| self.invalid.try_borrow_mut().is_err()
|| self.present_data.try_borrow_mut().is_err()
|| self.buffers.try_borrow_mut().is_err()
{
error!("other RefCells were borrowed when calling into the handler");
return None;
}
self.with_handler_and_dont_check_the_other_borrows(f)
}
#[track_caller]
fn with_handler_and_dont_check_the_other_borrows<T, F: FnOnce(&mut dyn WinHandler) -> T>(
&self,
f: F,
) -> Option<T> {
match self.handler.try_borrow_mut() {
Ok(mut h) => Some(f(&mut **h)),
Err(_) => {
error!("failed to borrow WinHandler at {}", Location::caller());
None
}
}
}
fn connect(&self, handle: WindowHandle) -> Result<(), Error> {
let size = self.size().size_dp();
let scale = self.scale.get();
self.with_handler(|h| {
h.connect(&handle.into());
h.scale(scale);
h.size(size);
});
Ok(())
}
/// Start the destruction of the window.
pub fn destroy(&self) {
if !self.destroyed() {
self.destroyed.set(true);
log_x11!(self.app.connection().destroy_window(self.id));
}
}
fn destroyed(&self) -> bool {
self.destroyed.get()
}
fn size(&self) -> ScaledArea {
self.area.get()
}
// note: size is in px
fn size_changed(&self, size: Size) -> Result<(), Error> {
let scale = self.scale.get();
let new_size = {
if size != self.area.get().size_px() {
self.area.set(ScaledArea::from_px(size, scale));
true
} else {
false
}
};
if new_size {
borrow_mut!(self.buffers)?.set_size(
self.app.connection(),
self.id,
size.width as u16,
size.height as u16,
);
borrow_mut!(self.cairo_surface)?
.set_size(size.width as i32, size.height as i32)
.map_err(|status| {
anyhow!(
"Failed to update cairo surface size to {:?}: {}",
size,
status
)
})?;
self.add_invalid_rect(size.to_dp(scale).to_rect())?;
self.with_handler(|h| h.size(size.to_dp(scale)));
self.with_handler(|h| h.scale(scale));
}
Ok(())
}
// Ensure that our cairo context is targeting the right drawable, allocating one if necessary.
fn update_cairo_surface(&self) -> Result<(), Error> {
let mut buffers = borrow_mut!(self.buffers)?;
let pixmap = if let Some(p) = buffers.idle_pixmaps.last() {
*p
} else {
info!("ran out of idle pixmaps, creating a new one");
buffers.create_pixmap(self.app.connection(), self.id)?
};
let drawable = XCBDrawable(pixmap);
borrow_mut!(self.cairo_surface)?
.set_drawable(&drawable, buffers.width as i32, buffers.height as i32)
.map_err(|e| anyhow!("Failed to update cairo drawable: {}", e))?;
Ok(())
}
fn render(&self) -> Result<(), Error> {
self.with_handler(|h| h.prepare_paint());
if self.destroyed() {
return Ok(());
}
self.update_cairo_surface()?;
let invalid = std::mem::replace(&mut *borrow_mut!(self.invalid)?, Region::EMPTY);
{
let surface = borrow!(self.cairo_surface)?;
let cairo_ctx = cairo::Context::new(&surface);
let scale = self.scale.get();
for rect in invalid.rects() {
let rect = rect.to_px(scale);
cairo_ctx.rectangle(rect.x0, rect.y0, rect.width(), rect.height());
}
cairo_ctx.clip();
cairo_ctx.scale(scale.x(), scale.y());
let mut piet_ctx = Piet::new(&cairo_ctx);
// We need to be careful with earlier returns here, because piet_ctx
// can panic if it isn't finish()ed. Also, we want to reset cairo's clip
// even on error.
//
// Note that we're borrowing the surface while calling the handler. This is ok, because
// we don't return control to the system or re-borrow the surface from any code that
// the client can call.
let result = self.with_handler_and_dont_check_the_other_borrows(|handler| {
handler.paint(&mut piet_ctx, &invalid);
piet_ctx
.finish()
.map_err(|e| anyhow!("Window::render - piet finish failed: {}", e))
});
let err = match result {
None => {
// The handler borrow failed, so finish didn't get called.
piet_ctx
.finish()
.map_err(|e| anyhow!("Window::render - piet finish failed: {}", e))
}
Some(e) => {
// Finish might have errored, in which case we want to propagate it.
e
}
};
cairo_ctx.reset_clip();
err?;
}
self.set_needs_present(false)?;
let mut buffers = borrow_mut!(self.buffers)?;
let pixmap = *buffers
.idle_pixmaps
.last()
.ok_or_else(|| anyhow!("after rendering, no pixmap to present"))?;
let scale = self.scale.get();
if let Some(present) = borrow_mut!(self.present_data)?.as_mut() {
present.present(self.app.connection(), pixmap, self.id, &invalid, scale)?;
buffers.idle_pixmaps.pop();
} else {
for rect in invalid.rects() {
let rect = rect.to_px(scale).expand();
let (x, y) = (rect.x0 as i16, rect.y0 as i16);
let (w, h) = (rect.width() as u16, rect.height() as u16);
self.app
.connection()
.copy_area(pixmap, self.id, self.gc, x, y, x, y, w, h)?;
}
}
Ok(())
}
fn show(&self) {
if !self.destroyed() {
log_x11!(self.app.connection().map_window(self.id));
}
}
fn close(&self) {
self.destroy();
}
/// Set whether the window should be resizable
fn resizable(&self, resizable: bool) {
let conn = self.app.connection().as_ref();
log_x11!(size_hints(resizable, self.size().size_px(), self.min_size)
.set_normal_hints(conn, self.id)
.context("set normal hints"));
}
/// Set whether the window should show titlebar
fn show_titlebar(&self, _show_titlebar: bool) {
warn!("Window::show_titlebar is currently unimplemented for X11 backend.");
}
fn get_position(&self) -> Point {
fn _get_position(window: &Window) -> Result<Point, Error> {
let conn = window.app.connection();
let scale = window.scale.get();
let geom = conn.get_geometry(window.id)?.reply()?;
let cord = conn
.translate_coordinates(window.id, geom.root, 0, 0)?
.reply()?;
Ok(Point::new(cord.dst_x as _, cord.dst_y as _).to_dp(scale))
}
let pos = _get_position(self);
log_x11!(&pos);
pos.unwrap_or_default()
}
fn set_position(&self, pos: Point) {
let conn = self.app.connection();
let scale = self.scale.get();
let pos = pos.to_px(scale).expand();
log_x11!(conn.configure_window(
self.id,
&ConfigureWindowAux::new().x(pos.x as i32).y(pos.y as i32),
));
}
fn set_size(&self, size: Size) {
let conn = self.app.connection();
let scale = self.scale.get();
let size = size.to_px(scale).expand();
log_x11!(conn.configure_window(
self.id,
&ConfigureWindowAux::new()
.width(size.width as u32)
.height(size.height as u32),
));
}
/// Bring this window to the front of the window stack and give it focus.
fn bring_to_front_and_focus(&self) {
if self.destroyed() {
return;
}
// TODO(x11/misc): Unsure if this does exactly what the doc comment says; need a test case.
let conn = self.app.connection();
log_x11!(conn.configure_window(
self.id,
&xproto::ConfigureWindowAux::new().stack_mode(xproto::StackMode::ABOVE),
));
log_x11!(conn.set_input_focus(
xproto::InputFocus::POINTER_ROOT,
self.id,
xproto::Time::CURRENT_TIME,
));
}
fn add_invalid_rect(&self, rect: Rect) -> Result<(), Error> {
// expanding not needed here, because we are expanding at every use of invalid
borrow_mut!(self.invalid)?.add_rect(rect);
Ok(())
}
/// Redraw more-or-less now.
///
/// "More-or-less" because if we're already waiting on a present, we defer the drawing until it
/// completes.
fn redraw_now(&self) -> Result<(), Error> {
if self.waiting_on_present()? {
self.set_needs_present(true)?;
} else {
self.render()?;
}
Ok(())
}
/// Schedule a redraw on the idle loop, or if we are waiting on present then schedule it for
/// when the current present finishes.
fn request_anim_frame(&self) {
if let Ok(true) = self.waiting_on_present() {
if let Err(e) = self.set_needs_present(true) {
error!(
"Window::request_anim_frame - failed to schedule present: {}",
e
);
}
} else {
let idle = IdleHandle {
queue: Arc::clone(&self.idle_queue),
pipe: self.idle_pipe,
};
idle.schedule_redraw();
}
}
fn invalidate(&self) {
let rect = self.size().size_dp().to_rect();
self.add_invalid_rect(rect)
.unwrap_or_else(|err| error!("Window::invalidate - failed to invalidate: {}", err));
self.request_anim_frame();
}
fn invalidate_rect(&self, rect: Rect) {
if let Err(err) = self.add_invalid_rect(rect) {
error!("Window::invalidate_rect - failed to enlarge rect: {}", err);
}
self.request_anim_frame();
}
fn set_title(&self, title: &str) {
if self.destroyed() {
return;
}
// This is technically incorrect. STRING encoding is *not* UTF8. However, I am not sure
// what it really is. WM_LOCALE_NAME might be involved. Hopefully, nothing cares about this
// as long as _NET_WM_NAME is also set (which uses UTF8).
log_x11!(self.app.connection().change_property8(
xproto::PropMode::REPLACE,
self.id,
AtomEnum::WM_NAME,
AtomEnum::STRING,
title.as_bytes(),
));
log_x11!(self.app.connection().change_property8(
xproto::PropMode::REPLACE,
self.id,
self.atoms._NET_WM_NAME,
self.atoms.UTF8_STRING,
title.as_bytes(),
));
}
fn set_cursor(&self, cursor: &Cursor) {
let cursors = &self.app.cursors;
#[allow(deprecated)]
let cursor = match cursor {
Cursor::Arrow => cursors.default,
Cursor::IBeam => cursors.text,
Cursor::Pointer => cursors.pointer,
Cursor::Crosshair => cursors.crosshair,
Cursor::OpenHand => {
warn!("Cursor::OpenHand not supported for x11 backend. using arrow cursor");
None
}
Cursor::NotAllowed => cursors.not_allowed,
Cursor::ResizeLeftRight => cursors.col_resize,
Cursor::ResizeUpDown => cursors.row_resize,
Cursor::Custom(custom) => Some(custom.0),
};
if cursor.is_none() {
warn!("Unable to load cursor {:?}", cursor);
return;
}
let conn = self.app.connection();
let changes = ChangeWindowAttributesAux::new().cursor(cursor);
if let Err(e) = conn.change_window_attributes(self.id, &changes) {
error!("Changing cursor window attribute failed {}", e);
};
}
fn set_menu(&self, _menu: Menu) {
// TODO(x11/menus): implement Window::set_menu (currently a no-op)
}
fn get_scale(&self) -> Result<Scale, Error> {
Ok(self.scale.get())
}
pub fn handle_expose(&self, expose: &xproto::ExposeEvent) -> Result<(), Error> {
let rect = Rect::from_origin_size(
(expose.x as f64, expose.y as f64),
(expose.width as f64, expose.height as f64),
)
.to_dp(self.scale.get());
self.add_invalid_rect(rect)?;
if self.waiting_on_present()? {
self.set_needs_present(true)?;
} else if expose.count == 0 {
self.request_anim_frame();
}
Ok(())
}
pub fn handle_key_press(&self, key_press: &xproto::KeyPressEvent) {
let hw_keycode = key_press.detail;
let code = keycodes::hardware_keycode_to_code(hw_keycode);
let mods = key_mods(key_press.state);
let key = keycodes::code_to_key(code, mods);
let location = keycodes::code_to_location(code);
let state = KeyState::Down;
let key_event = KeyEvent {
code,
key,
mods,
location,
state,
repeat: false,
is_composing: false,
};
self.with_handler(|h| {
if !h.key_down(key_event.clone()) {
simulate_input(h, self.active_text_field.get(), key_event);
}
});
}
pub fn handle_button_press(
&self,
button_press: &xproto::ButtonPressEvent,
) -> Result<(), Error> {
let button = mouse_button(button_press.detail);
let scale = self.scale.get();
let mouse_event = MouseEvent {
pos: Point::new(button_press.event_x as f64, button_press.event_y as f64).to_dp(scale),
// The xcb state field doesn't include the newly pressed button, but
// druid wants it to be included.
buttons: mouse_buttons(button_press.state).with(button),
mods: key_mods(button_press.state),
// TODO: detect the count
count: 1,
focus: false,
button,
wheel_delta: Vec2::ZERO,
};
self.with_handler(|h| h.mouse_down(&mouse_event));
Ok(())
}
pub fn handle_button_release(
&self,
button_release: &xproto::ButtonReleaseEvent,
) -> Result<(), Error> {
let scale = self.scale.get();
let button = mouse_button(button_release.detail);
let mouse_event = MouseEvent {
pos: Point::new(button_release.event_x as f64, button_release.event_y as f64)
.to_dp(scale),
// The xcb state includes the newly released button, but druid
// doesn't want it.
buttons: mouse_buttons(button_release.state).without(button),
mods: key_mods(button_release.state),
count: 0,
focus: false,
button,
wheel_delta: Vec2::ZERO,
};
self.with_handler(|h| h.mouse_up(&mouse_event));
Ok(())
}
pub fn handle_wheel(&self, event: &xproto::ButtonPressEvent) -> Result<(), Error> {
let button = event.detail;
let mods = key_mods(event.state);
let scale = self.scale.get();
// We use a delta of 120 per tick to match the behavior of Windows.
let is_shift = mods.shift();
let delta = match button {
4 if is_shift => (-120.0, 0.0),
4 => (0.0, -120.0),
5 if is_shift => (120.0, 0.0),
5 => (0.0, 120.0),
6 => (-120.0, 0.0),
7 => (120.0, 0.0),
_ => return Err(anyhow!("unexpected mouse wheel button: {}", button)),
};
let mouse_event = MouseEvent {
pos: Point::new(event.event_x as f64, event.event_y as f64).to_dp(scale),
buttons: mouse_buttons(event.state),
mods: key_mods(event.state),
count: 0,
focus: false,
button: MouseButton::None,
wheel_delta: delta.into(),
};
self.with_handler(|h| h.wheel(&mouse_event));
Ok(())
}
pub fn handle_motion_notify(
&self,
motion_notify: &xproto::MotionNotifyEvent,
) -> Result<(), Error> {
let scale = self.scale.get();
let mouse_event = MouseEvent {
pos: Point::new(motion_notify.event_x as f64, motion_notify.event_y as f64)
.to_dp(scale),
buttons: mouse_buttons(motion_notify.state),
mods: key_mods(motion_notify.state),
count: 0,
focus: false,
button: MouseButton::None,
wheel_delta: Vec2::ZERO,
};
self.with_handler(|h| h.mouse_move(&mouse_event));
Ok(())
}
pub fn handle_client_message(&self, client_message: &xproto::ClientMessageEvent) {
// https://www.x.org/releases/X11R7.7/doc/libX11/libX11/libX11.html#id2745388
// https://www.x.org/releases/X11R7.6/doc/xorg-docs/specs/ICCCM/icccm.html#window_deletion
if client_message.type_ == self.atoms.WM_PROTOCOLS && client_message.format == 32 {
let protocol = client_message.data.as_data32()[0];
if protocol == self.atoms.WM_DELETE_WINDOW {
self.with_handler(|h| h.request_close());
}
}
}
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn handle_destroy_notify(&self, _destroy_notify: &xproto::DestroyNotifyEvent) {
self.with_handler(|h| h.destroy());
}
pub fn handle_configure_notify(&self, event: &ConfigureNotifyEvent) -> Result<(), Error> {
self.size_changed(Size::new(event.width as f64, event.height as f64))
}
pub fn handle_complete_notify(&self, event: &CompleteNotifyEvent) -> Result<(), Error> {
if let Some(present) = borrow_mut!(self.present_data)?.as_mut() {
// A little sanity check (which isn't worth an early return): we should only have
// one present request in flight, so we should only get notified about the request
// that we're waiting for.
if present.waiting_on != Some(event.serial) {
warn!(
"Got a notify for serial {}, but waiting on {:?}",
event.serial, present.waiting_on
);
}
// Check whether we missed presenting on any frames.
if let Some(last_msc) = present.last_msc {
if last_msc.wrapping_add(1) != event.msc {
tracing::debug!(
"missed a present: msc went from {} to {}",
last_msc,
event.msc
);
if let Some(last_ust) = present.last_ust {
tracing::debug!("ust went from {} to {}", last_ust, event.ust);
}
}
}
// Only store the last MSC if we're animating (if we aren't animating, missed MSCs
// aren't interesting).
present.last_msc = if present.needs_present {
Some(event.msc)
} else {
None
};
present.last_ust = Some(event.ust);
present.waiting_on = None;
}
if self.needs_present()? {
self.render()?;
}
Ok(())
}
pub fn handle_idle_notify(&self, event: &IdleNotifyEvent) -> Result<(), Error> {
if self.destroyed() {
return Ok(());
}
let mut buffers = borrow_mut!(self.buffers)?;
if buffers.all_pixmaps.contains(&event.pixmap) {
buffers.idle_pixmaps.push(event.pixmap);
} else {
// We must have reallocated the buffers while this pixmap was busy, so free it now.
// Regular freeing happens in `Buffers::free_pixmaps`.
self.app.connection().free_pixmap(event.pixmap)?;
}
Ok(())
}
fn waiting_on_present(&self) -> Result<bool, Error> {
Ok(borrow!(self.present_data)?
.as_ref()
.map(|p| p.waiting_on.is_some())
.unwrap_or(false))
}
fn set_needs_present(&self, val: bool) -> Result<(), Error> {
if let Some(present) = borrow_mut!(self.present_data)?.as_mut() {
present.needs_present = val;
}
Ok(())
}
fn needs_present(&self) -> Result<bool, Error> {
Ok(borrow!(self.present_data)?
.as_ref()
.map(|p| p.needs_present)
.unwrap_or(false))
}
pub(crate) fn run_idle(&self) {
let mut queue = Vec::new();
std::mem::swap(&mut *self.idle_queue.lock().unwrap(), &mut queue);
let mut needs_redraw = false;
self.with_handler(|handler| {
for callback in queue {
match callback {
IdleKind::Callback(f) => {
f.call(handler);
}
IdleKind::Token(tok) => {
handler.idle(tok);
}
IdleKind::Redraw => {
needs_redraw = true;
}
}
}
});
if needs_redraw {
if let Err(e) = self.redraw_now() {
error!("Error redrawing: {}", e);
}
}
}
pub(crate) fn next_timeout(&self) -> Option<Instant> {
self.timer_queue
.lock()
.unwrap()
.peek()
.map(|timer| timer.deadline())
}
pub(crate) fn run_timers(&self, now: Instant) {
while let Some(deadline) = self.next_timeout() {
if deadline > now {
break;
}
// Remove the timer and get the token
let token = self.timer_queue.lock().unwrap().pop().unwrap().token();
self.with_handler(|h| h.timer(token));
}
}
}
impl Buffers {
fn new(
conn: &Rc<XCBConnection>,
window_id: u32,
buf_count: usize,
width: u16,
height: u16,
depth: u8,
) -> Result<Buffers, Error> {
let mut ret = Buffers {
width,
height,
depth,
idle_pixmaps: Vec::new(),
all_pixmaps: Vec::new(),
};
ret.create_pixmaps(conn, window_id, buf_count)?;
Ok(ret)
}
/// Frees all the X pixmaps that we hold.
fn free_pixmaps(&mut self, conn: &Rc<XCBConnection>) {
// We can't touch pixmaps if the present extension is waiting on them, so only free the
// idle ones. We'll free the busy ones when we get notified that they're idle in `Window::handle_idle_notify`.
for &p in &self.idle_pixmaps {
log_x11!(conn.free_pixmap(p));
}
self.all_pixmaps.clear();
self.idle_pixmaps.clear();
}
fn set_size(&mut self, conn: &Rc<XCBConnection>, window_id: u32, width: u16, height: u16) {
// How big should the buffer be if we want at least x pixels? Rounding up to the next power
// of 2 has the potential to waste 75% of our memory (factor 2 in both directions), so
// instead we round up to the nearest number of the form 2^k or 3 * 2^k.
fn next_size(x: u16) -> u16 {
// We round up to the nearest multiple of `accuracy`, which is between x/2 and x/4.
// Don't bother rounding to anything smaller than 32 = 2^(7-1).
let accuracy = 1 << ((16 - x.leading_zeros()).max(7) - 2);
let mask = accuracy - 1;
(x + mask) & !mask
}
let width = next_size(width);
let height = next_size(height);
if (width, height) != (self.width, self.height) {
let count = self.all_pixmaps.len();
self.free_pixmaps(conn);
self.width = width;
self.height = height;
log_x11!(self.create_pixmaps(conn, window_id, count));
}
}
/// Creates a new pixmap for rendering to. The new pixmap will be first in line for rendering.
fn create_pixmap(&mut self, conn: &Rc<XCBConnection>, window_id: u32) -> Result<Pixmap, Error> {
let pixmap_id = conn.generate_id()?;
conn.create_pixmap(self.depth, pixmap_id, window_id, self.width, self.height)?;
self.all_pixmaps.push(pixmap_id);
self.idle_pixmaps.push(pixmap_id);
Ok(pixmap_id)
}
fn create_pixmaps(
&mut self,
conn: &Rc<XCBConnection>,
window_id: u32,
count: usize,
) -> Result<(), Error> {
if !self.all_pixmaps.is_empty() {
self.free_pixmaps(conn);
}
for _ in 0..count {
self.create_pixmap(conn, window_id)?;
}
Ok(())
}
}
impl PresentData {
// We have already rendered into the active pixmap buffer. Present it to the
// X server, and then rotate the buffers.
fn present(
&mut self,
conn: &Rc<XCBConnection>,
pixmap: Pixmap,
window_id: u32,
region: &Region,
scale: Scale,
) -> Result<(), Error> {
let x_rects: Vec<Rectangle> = region
.rects()
.iter()
.map(|r| {
let r = r.to_px(scale).expand();
Rectangle {
x: r.x0 as i16,
y: r.y0 as i16,
width: r.width() as u16,
height: r.height() as u16,
}
})
.collect();
conn.xfixes_set_region(self.region, &x_rects[..])?;
conn.present_pixmap(
window_id,
pixmap,
self.serial,
// valid region of the pixmap
self.region,
// region of the window that must get updated
self.region,
// window-relative x-offset of the pixmap
0,
// window-relative y-offset of the pixmap
0,
// target CRTC
x11rb::NONE,
// wait fence
x11rb::NONE,
// idle fence
x11rb::NONE,
// present options
x11rb::protocol::present::Option::NONE.into(),
// target msc (0 means present at the next time that msc % divisor == remainder)
0,
// divisor
1,
// remainder
0,
// notifies
&[],
)?;
self.waiting_on = Some(self.serial);
self.serial += 1;
Ok(())
}
}
// Converts from, e.g., the `details` field of `xcb::xproto::ButtonPressEvent`
fn mouse_button(button: u8) -> MouseButton {
match button {
1 => MouseButton::Left,
2 => MouseButton::Middle,
3 => MouseButton::Right,
// buttons 4 through 7 are for scrolling.
4..=7 => MouseButton::None,
8 => MouseButton::X1,
9 => MouseButton::X2,
_ => {
warn!("unknown mouse button code {}", button);
MouseButton::None
}
}
}
// Extracts the mouse buttons from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn mouse_buttons(mods: u16) -> MouseButtons {
let mut buttons = MouseButtons::new();
let button_masks = &[
(xproto::ButtonMask::M1, MouseButton::Left),
(xproto::ButtonMask::M2, MouseButton::Middle),
(xproto::ButtonMask::M3, MouseButton::Right),
// TODO: determine the X1/X2 state, using our own caching if necessary.
// BUTTON_MASK_4/5 do not work: they are for scroll events.
];
for (mask, button) in button_masks {
if mods & u16::from(*mask) != 0 {
buttons.insert(*button);
}
}
buttons
}
// Extracts the keyboard modifiers from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn key_mods(mods: u16) -> Modifiers {
let mut ret = Modifiers::default();
let mut key_masks = [
(xproto::ModMask::SHIFT, Modifiers::SHIFT),
(xproto::ModMask::CONTROL, Modifiers::CONTROL),
// X11's mod keys are configurable, but this seems
// like a reasonable default for US keyboards, at least,
// where the "windows" key seems to be MOD_MASK_4.
(xproto::ModMask::M1, Modifiers::ALT),
(xproto::ModMask::M2, Modifiers::NUM_LOCK),
(xproto::ModMask::M4, Modifiers::META),
(xproto::ModMask::LOCK, Modifiers::CAPS_LOCK),
];
for (mask, modifiers) in &mut key_masks {
if mods & u16::from(*mask) != 0 {
ret |= *modifiers;
}
}
ret
}
/// A handle that can get used to schedule an idle handler. Note that
/// this handle can be cloned and sent between threads.
#[derive(Clone)]
pub struct IdleHandle {
queue: Arc<Mutex<Vec<IdleKind>>>,
pipe: RawFd,
}
pub(crate) enum IdleKind {
Callback(Box<dyn IdleCallback>),
Token(IdleToken),
Redraw,
}
impl IdleHandle {
fn wake(&self) {
loop {
match nix::unistd::write(self.pipe, &[0]) {
Err(nix::Error::Sys(nix::errno::Errno::EINTR)) => {}
Err(nix::Error::Sys(nix::errno::Errno::EAGAIN)) => {}
Err(e) => {
error!("Failed to write to idle pipe: {}", e);
break;
}
Ok(_) => {
break;
}
}
}
}
pub(crate) fn schedule_redraw(&self) {
self.queue.lock().unwrap().push(IdleKind::Redraw);
self.wake();
}
pub fn add_idle_callback<F>(&self, callback: F)
where
F: FnOnce(&mut dyn WinHandler) + Send + 'static,
{
self.queue
.lock()
.unwrap()
.push(IdleKind::Callback(Box::new(callback)));
self.wake();
}
pub fn add_idle_token(&self, token: IdleToken) {
self.queue.lock().unwrap().push(IdleKind::Token(token));
self.wake();
}
}
#[derive(Clone, Default)]
pub(crate) struct WindowHandle {
id: u32,
window: Weak<Window>,
}
impl WindowHandle {
fn new(id: u32, window: Weak<Window>) -> WindowHandle {
WindowHandle { id, window }
}
pub fn show(&self) {
if let Some(w) = self.window.upgrade() {
w.show();
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn close(&self) {
if let Some(w) = self.window.upgrade() {
w.close();
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn resizable(&self, resizable: bool) {
if let Some(w) = self.window.upgrade() {
w.resizable(resizable);
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn show_titlebar(&self, show_titlebar: bool) {
if let Some(w) = self.window.upgrade() {
w.show_titlebar(show_titlebar);
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn set_position(&self, position: Point) {
if let Some(w) = self.window.upgrade() {
w.set_position(position);
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn get_position(&self) -> Point {
if let Some(w) = self.window.upgrade() {
w.get_position()
} else {
error!("Window {} has already been dropped", self.id);
Point::new(0.0, 0.0)
}
}
pub fn content_insets(&self) -> Insets {
warn!("WindowHandle::content_insets unimplemented for X11 backend.");
Insets::ZERO
}
pub fn set_level(&self, _level: WindowLevel) {
warn!("WindowHandle::set_level unimplemented for X11 backend.");
}
pub fn set_size(&self, size: Size) {
if let Some(w) = self.window.upgrade() {
w.set_size(size);
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn get_size(&self) -> Size {
if let Some(w) = self.window.upgrade() {
w.size().size_dp()
} else {
error!("Window {} has already been dropped", self.id);
Size::ZERO
}
}
pub fn set_window_state(&self, _state: window::WindowState) {
warn!("WindowHandle::set_window_state is currently unimplemented for X11 backend.");
}
pub fn get_window_state(&self) -> window::WindowState {
warn!("WindowHandle::get_window_state is currently unimplemented for X11 backend.");
window::WindowState::Restored
}
pub fn handle_titlebar(&self, _val: bool) {
warn!("WindowHandle::handle_titlebar is currently unimplemented for X11 backend.");
}
pub fn bring_to_front_and_focus(&self) {
if let Some(w) = self.window.upgrade() {
w.bring_to_front_and_focus();
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn request_anim_frame(&self) {
if let Some(w) = self.window.upgrade() {
w.request_anim_frame();
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn invalidate(&self) {
if let Some(w) = self.window.upgrade() {
w.invalidate();
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn invalidate_rect(&self, rect: Rect) {
if let Some(w) = self.window.upgrade() {
w.invalidate_rect(rect);
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn set_title(&self, title: &str) {
if let Some(w) = self.window.upgrade() {
w.set_title(title);
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn set_menu(&self, menu: Menu) {
if let Some(w) = self.window.upgrade() {
w.set_menu(menu);
} else {
error!("Window {} has already been dropped", self.id);
}
}
pub fn text(&self) -> PietText {
PietText::new()
}
pub fn add_text_field(&self) -> TextFieldToken {
TextFieldToken::next()
}
pub fn remove_text_field(&self, token: TextFieldToken) {
if let Some(window) = self.window.upgrade() {
if window.active_text_field.get() == Some(token) {
window.active_text_field.set(None)
}
}
}
pub fn set_focused_text_field(&self, active_field: Option<TextFieldToken>) {
if let Some(window) = self.window.upgrade() {
window.active_text_field.set(active_field);
}
}
pub fn update_text_field(&self, _token: TextFieldToken, _update: Event) {
// noop until we get a real text input implementation
}
pub fn request_timer(&self, deadline: Instant) -> TimerToken {
if let Some(w) = self.window.upgrade() {
let timer = Timer::new(deadline);
w.timer_queue.lock().unwrap().push(timer);
timer.token()
} else {
TimerToken::INVALID
}
}
pub fn set_cursor(&mut self, cursor: &Cursor) {
if let Some(w) = self.window.upgrade() {
w.set_cursor(cursor);
}
}
pub fn make_cursor(&self, desc: &CursorDesc) -> Option<Cursor> {
if let Some(w) = self.window.upgrade() {
match w.app.render_argb32_pictformat_cursor() {
None => {
warn!("Custom cursors are not supported by the X11 server");
None
}
Some(format) => {
let conn = w.app.connection();
let setup = &conn.setup();
let screen = &setup.roots[w.app.screen_num()];
match make_cursor(&**conn, setup.image_byte_order, screen.root, format, desc) {
// TODO: We 'leak' the cursor - nothing ever calls render_free_cursor
Ok(cursor) => Some(cursor),
Err(err) => {
error!("Failed to create custom cursor: {:?}", err);
None
}
}
}
}
} else {
None
}
}
pub fn open_file(&mut self, _options: FileDialogOptions) -> Option<FileDialogToken> {
// TODO(x11/file_dialogs): implement WindowHandle::open_file
warn!("WindowHandle::open_file is currently unimplemented for X11 backend.");
None
}
pub fn save_as(&mut self, _options: FileDialogOptions) -> Option<FileDialogToken> {
// TODO(x11/file_dialogs): implement WindowHandle::save_as
warn!("WindowHandle::save_as is currently unimplemented for X11 backend.");
None
}
pub fn show_context_menu(&self, _menu: Menu, _pos: Point) {
// TODO(x11/menus): implement WindowHandle::show_context_menu
warn!("WindowHandle::show_context_menu is currently unimplemented for X11 backend.");
}
pub fn get_idle_handle(&self) -> Option<IdleHandle> {
self.window.upgrade().map(|w| IdleHandle {
queue: Arc::clone(&w.idle_queue),
pipe: w.idle_pipe,
})
}
pub fn get_scale(&self) -> Result<Scale, ShellError> {
if let Some(w) = self.window.upgrade() {
Ok(w.get_scale()?)
} else {
error!("Window {} has already been dropped", self.id);
Ok(Scale::new(1.0, 1.0))
}
}
}
#[cfg(feature = "raw-win-handle")]
unsafe impl HasRawWindowHandle for WindowHandle {
fn raw_window_handle(&self) -> RawWindowHandle {
let mut handle = XcbHandle {
window: self.id,
..XcbHandle::empty()
};
if let Some(window) = self.window.upgrade() {
handle.connection = window.app.connection().get_raw_xcb_connection();
} else {
// Documentation for HasRawWindowHandle encourages filling in all fields possible,
// leaving those empty that cannot be derived.
error!("Failed to get XCBConnection, returning incomplete handle");
}
RawWindowHandle::Xcb(handle)
}
}
fn make_cursor(
conn: &XCBConnection,
byte_order: X11ImageOrder,
root_window: u32,
argb32_format: Pictformat,
desc: &CursorDesc,
) -> Result<Cursor, ReplyOrIdError> {
// BEGIN: Lots of code just to get the image into a RENDER Picture
fn multiply_alpha(color: u8, alpha: u8) -> u8 {
let (color, alpha) = (u16::from(color), u16::from(alpha));
let temp = color * alpha + 0x80u16;
((temp + (temp >> 8)) >> 8) as u8
}
// No idea how to sanely get the pixel values, so I'll go with 'insane':
// Iterate over all pixels and build an array
let pixels = desc
.image
.pixel_colors()
.flat_map(|row| {
row.flat_map(|color| {
let (r, g, b, a) = color.as_rgba8();
// RENDER wants premultiplied alpha
let (r, g, b) = (
multiply_alpha(r, a),
multiply_alpha(g, a),
multiply_alpha(b, a),
);
// piet gives us rgba in this order, the server expects an u32 with argb.
let (b0, b1, b2, b3) = match byte_order {
X11ImageOrder::LSB_FIRST => (b, g, r, a),
_ => (a, r, g, b),
};
// TODO Ownership and flat_map don't go well together :-(
vec![b0, b1, b2, b3]
})
})
.collect::<Vec<u8>>();
let width = desc.image.width().try_into().expect("Invalid cursor width");
let height = desc
.image
.height()
.try_into()
.expect("Invalid cursor height");
let pixmap = conn.generate_id()?;
let gc = conn.generate_id()?;
let picture = conn.generate_id()?;
conn.create_pixmap(32, pixmap, root_window, width, height)?;
conn.create_gc(gc, pixmap, &Default::default())?;
conn.put_image(
ImageFormat::Z_PIXMAP,
pixmap,
gc,
width,
height,
0,
0,
0,
32,
&pixels,
)?;
conn.render_create_picture(picture, pixmap, argb32_format, &Default::default())?;
conn.free_gc(gc)?;
conn.free_pixmap(pixmap)?;
// End: Lots of code just to get the image into a RENDER Picture
let cursor = conn.generate_id()?;
conn.render_create_cursor(cursor, picture, desc.hot.x as u16, desc.hot.y as u16)?;
conn.render_free_picture(picture)?;
Ok(Cursor::Custom(CustomCursor(cursor)))
}
| 34.986018 | 118 | 0.564744 |
5d0ca9f71c3f17fe524189f7dc95efa2005265d8 | 625 | use nom::{error::VerboseError, number::complete as number};
pub fn parse_u8(part: &str) -> Result<u8, nom::Err<VerboseError<&str>>> {
let (_, part) = number::double(part)?;
Ok(part as u8)
}
pub fn parse_u16(part: &str) -> Result<u16, nom::Err<VerboseError<&str>>> {
let (_, part) = number::double(part)?;
Ok(part as u16)
}
pub fn parse_u32(part: &str) -> Result<u32, nom::Err<VerboseError<&str>>> {
let (_, part) = number::double(part)?;
Ok(part as u32)
}
pub fn parse_u64(part: &str) -> Result<u64, nom::Err<VerboseError<&str>>> {
let (_, part) = number::double(part)?;
Ok(part as u64)
}
| 28.409091 | 75 | 0.6176 |
9cbe5bd6aedd09c5b070c2b8dcf7894166fbe39b | 625 | use super::Semiring;
use crate::num::primitive::Int as PrimInt;
#[allow(unused_imports)]
use crate::utils::for_each_tuple; // for cargo-simple-bundler
/// A commutative semiring.
///
/// # Laws
/// * Commutativity of multiplication: ∀`x` ∀`y` (`x.mul(&y)` = `y.mul(&x)`)
pub trait CommutativeSemiring: Semiring {}
impl CommutativeSemiring for () {}
impl<Int> CommutativeSemiring for Int where Int: PrimInt {}
macro_rules! impl_for_tuple {
($($i:tt: $T:ident,)*) => {
impl<$($T),*> CommutativeSemiring for ($($T,)*)
where
$($T: CommutativeSemiring,)*
{
}
};
}
for_each_tuple! { impl_for_tuple }
| 23.148148 | 76 | 0.6448 |
e5fbb23d9871456abcae919690cc08bddca7802b | 1,140 | use crate::models::Window;
use crate::models::Workspace;
// use crate::models::WindowState;
/// Layout which gives only one window with the full desktop realestate. A monocle mode.
pub fn update(workspace: &Workspace, windows: &mut Vec<&mut Window>) {
let window_count = windows.len();
if window_count == 0 {
return;
}
let workspace_width = workspace.width_limited(1);
let workspace_x = workspace.x_limited(1);
let mut iter = windows.iter_mut();
//maximize primary window
{
if let Some(monowin) = iter.next() {
monowin.set_height(workspace.height());
monowin.set_width(workspace_width);
monowin.set_x(workspace_x);
monowin.set_y(workspace.y());
monowin.set_visible(true);
}
}
//hide all other windows
{
if window_count > 1 {
for w in iter {
w.set_height(workspace.height());
w.set_width(workspace_width);
w.set_x(workspace_x);
w.set_y(workspace.y());
w.set_visible(false);
}
}
}
}
| 26.511628 | 88 | 0.572807 |
e636794449f3f45031fd82e6d53fd6df5541fab3 | 926 | #[repr(C)]
pub struct U16Array {
pub data: *mut u16,
pub data_len: usize,
}
impl Into<Vec<u16>> for U16Array {
fn into(self) -> Vec<u16> {
unsafe {
Box::from_raw(std::slice::from_raw_parts_mut(self.data, self.data_len)).into_vec()
}
}
}
impl From<Vec<u16>> for U16Array {
fn from(vec: Vec<u16>) -> Self {
let mut box_slice = vec.into_boxed_slice();
let array = Self {
data: box_slice.as_mut_ptr(),
data_len: box_slice.len(),
};
let _ = Box::into_raw(box_slice);
array
}
}
impl Drop for U16Array {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(std::slice::from_raw_parts_mut(self.data, self.data_len)) };
}
}
#[no_mangle]
pub extern "C" fn destroy_u16_array(array: *mut U16Array) {
unsafe {
if !array.is_null() {
let _ = Box::from_raw(array);
}
}
}
| 22.585366 | 99 | 0.559395 |
91e6e9c2a145e434190e8f779e46358804ac755a | 1,498 | //! Solutions for day 5
/// Return the number of jumps it takes to exit the instruction set encoded in the string
///
/// The `offset_rule` defines how the instruction that was just ran should be mutated for the next
/// executation, given the value of that instruction.
///
/// # Examples
///
/// The `offset_rule` increments the just-run instruction by 1.
///
/// ```
/// use aoc17::day5::jumps_until_end;
///
/// assert_eq!(jumps_until_end("0
/// 3
/// 0
/// 1
/// -3", |jump| 1), 5);
/// ```
///
/// The `offset_rule` increments the just-run instruction by 1 if its value was less than 3, else
/// it decrements it by 1.
///
/// ```
/// use aoc17::day5::jumps_until_end;
///
/// let offset_rule = |jump : isize| if jump < 3 { 1 } else { -1 };
/// assert_eq!(jumps_until_end("0
/// 3
/// 0
/// 1
/// -3", offset_rule), 10);
/// ```
pub fn jumps_until_end(s : &str, offset_rule : fn(isize) -> isize) -> usize {
let instructions : Vec<_> = s.lines().map(|x| x.parse::<isize>().unwrap()).collect();
let ninstructions = instructions.len();
let mut position : usize = 0;
let mut offsets : Vec<isize> = vec![0; ninstructions];
let mut nexecuted : usize = 0;
while position < ninstructions {
let instruction = instructions[position];
let offset = offsets[position];
let jump = instruction + offset;
offsets[position] += offset_rule(jump);
position = ((position as isize) + jump) as usize;
nexecuted += 1;
}
nexecuted
}
| 28.264151 | 98 | 0.616822 |
79df7080e3fa439d4c1087aa73114c786201982b | 1,862 | // Copyright 2020-2022 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use identity::iota_core::Network;
use wasm_bindgen::prelude::*;
use crate::error::Result;
use crate::error::WasmResult;
#[wasm_bindgen(js_name = Network)]
pub struct WasmNetwork(pub(crate) Network);
#[wasm_bindgen(js_class = Network)]
impl WasmNetwork {
/// Parses the provided string to a `Network`.
///
/// Errors if the name is invalid.
#[wasm_bindgen(js_name = tryFromName)]
pub fn try_from_name(name: String) -> Result<WasmNetwork> {
Network::try_from_name(name).map(Self).wasm_result()
}
#[wasm_bindgen]
pub fn mainnet() -> WasmNetwork {
Self(Network::Mainnet)
}
#[wasm_bindgen]
pub fn devnet() -> WasmNetwork {
Self(Network::Devnet)
}
#[wasm_bindgen(getter)]
pub fn name(&self) -> String {
self.0.name_str().to_owned()
}
/// Returns the node URL of the Tangle network.
#[wasm_bindgen(getter = defaultNodeURL)]
pub fn default_node_url(&self) -> Option<String> {
self.0.default_node_url().map(ToString::to_string)
}
#[allow(clippy::inherent_to_string, clippy::wrong_self_convention)]
#[wasm_bindgen(js_name = toString)]
pub fn to_string(&self) -> String {
self.0.name_str().to_owned()
}
/// Serializes a `Network` as a JSON object.
#[wasm_bindgen(js_name = toJSON)]
pub fn to_json(&self) -> Result<JsValue> {
JsValue::from_serde(&self.0).wasm_result()
}
/// Deserializes a `Network` from a JSON object.
#[wasm_bindgen(js_name = fromJSON)]
pub fn from_json(json: &JsValue) -> Result<WasmNetwork> {
json.into_serde().map(Self).wasm_result()
}
}
impl_wasm_clone!(WasmNetwork, Network);
impl From<WasmNetwork> for Network {
fn from(other: WasmNetwork) -> Self {
other.0
}
}
impl From<Network> for WasmNetwork {
fn from(other: Network) -> Self {
Self(other)
}
}
| 24.5 | 69 | 0.681525 |
f5d4dac191b88dc68a90f62fcc841262987a9a9b | 3,237 | use crate::auth::user::User as AuthUser;
use crate::errors::*;
use actix_web::dev::{self, Body, BodySize, MessageBody};
use actix_web::{error, http, http::StatusCode, HttpResponse, Responder};
use bytes::{BufMut, Bytes, BytesMut};
use futures::stream::StreamExt;
use serde_json::{self, Value};
use std::collections::HashMap;
use std::str;
const DEFAULT_BUF_SIZE: usize = 64 * 1024;
pub fn unauthorized<T: Responder>(
user: Option<AuthUser>,
additional_data: Option<HashMap<&'static str, Value>>,
) -> Result<T, ApiError> {
unauthorized_with_message("User does not have the required permissions", user, additional_data)
}
pub fn unauthorized_with_message<T: Responder>(
message: &str,
auth_user: Option<AuthUser>,
additional_data: Option<HashMap<&'static str, Value>>,
) -> Result<T, ApiError> {
if let Some(auth_user) = auth_user {
auth_user.log_unauthorized_access_attempt(additional_data.unwrap_or(HashMap::new()));
}
Err(AuthError::new(AuthErrorType::Unauthorized, message.into()).into())
}
pub fn forbidden<T: Responder>(message: &str) -> Result<T, ApiError> {
Err(AuthError::new(AuthErrorType::Forbidden, message.into()).into())
}
pub fn unprocessable<T: Responder>(message: &str) -> Result<T, ApiError> {
Err(ApplicationError::new_with_type(ApplicationErrorType::Unprocessable, message.to_string()).into())
}
pub fn bad_request<T: Responder>(message: &str) -> Result<T, ApiError> {
Err(ApplicationError::new_with_type(ApplicationErrorType::BadRequest, message.to_string()).into())
}
pub fn internal_server_error<T: Responder>(message: &str) -> Result<T, ApiError> {
error!("Internal Server Error: {}", message);
Err(ApplicationError::new(message.to_string()).into())
}
pub fn no_content() -> Result<HttpResponse, ApiError> {
Ok(HttpResponse::new(StatusCode::NO_CONTENT))
}
pub fn not_found() -> Result<HttpResponse, ApiError> {
warn!("Not found");
Ok(HttpResponse::new(StatusCode::NOT_FOUND))
}
pub fn method_not_allowed() -> Result<HttpResponse, ApiError> {
warn!("Method not allowed");
Ok(HttpResponse::new(StatusCode::METHOD_NOT_ALLOWED))
}
pub fn created(json: serde_json::Value) -> Result<HttpResponse, ApiError> {
Ok(HttpResponse::Created().json(json))
}
pub fn redirect(url: &str) -> Result<HttpResponse, ApiError> {
Ok(HttpResponse::Found().header(http::header::LOCATION, url).finish())
}
pub fn unwrap_body_to_string(response: &HttpResponse) -> Result<&str, &'static str> {
match response.body() {
dev::ResponseBody::Body(Body::Bytes(binary)) | dev::ResponseBody::Other(Body::Bytes(binary)) => {
match str::from_utf8(binary.as_ref()) {
Ok(value) => Ok(value),
Err(_) => Err("Unable to unwrap body"),
}
}
_ => Err("Unexpected response body"),
}
}
pub async fn extract_response_bytes<B: MessageBody>(body: &mut dev::ResponseBody<B>) -> Result<Bytes, error::Error> {
let size_hint = match body.size() {
BodySize::Sized(n) => n,
_ => DEFAULT_BUF_SIZE,
};
let mut buf = BytesMut::with_capacity(size_hint);
while let Some(item) = body.next().await {
buf.put(item?);
}
Ok(buf.freeze())
}
| 35.184783 | 117 | 0.678715 |
727082d2bd43e7c3ea3a8a716697cc88da7503ef | 18,056 | use log::*;
use winapi::shared::dxgi::*;
use winapi::shared::dxgiformat::*;
use winapi::shared::dxgitype::*;
use winapi::shared::minwindef::*;
use winapi::shared::windef::{HBRUSH, HICON, HMENU, HWND, POINT, RECT};
use winapi::um::d3d11::*;
use winapi::um::d3dcommon::*;
use winapi::um::libloaderapi::GetProcAddress;
use winapi::um::libloaderapi::LoadLibraryA;
use winapi::um::winnt::*;
use winapi::um::winuser::*;
use winapi::Interface;
use winapi::um::xinput::XINPUT_STATE;
use winapi::um::xinput::XInputGetState;
use core::mem::MaybeUninit;
use std::cell::Cell;
use std::ffi::c_void;
use std::ffi::CString;
use std::ptr::null_mut;
use crate::imgui_impl;
use crate::mh;
use crate::util::Error;
type Result<T> = std::result::Result<T, Error>;
type XInputGetStateType =
unsafe extern "system" fn(dw_user_index: DWORD, p_state: *mut XINPUT_STATE) -> DWORD;
type DXGISwapChainPresentType =
unsafe extern "system" fn(This: *mut IDXGISwapChain, SyncInterval: UINT, Flags: UINT) -> HRESULT;
// type IDXGISwapChainPresent =
// unsafe extern "system" fn(This: *mut IDXGISwapChain, SyncInterval: UINT, Flags: UINT) -> HRESULT;
type WndProc =
unsafe extern "system" fn(hwnd: HWND, umsg: UINT, wparam: WPARAM, lparam: LPARAM) -> isize;
/// Data structure to hold all info we need at frame render time.
pub(crate) struct DxgiHook {
present_trampoline: DXGISwapChainPresentType,
default_wnd_proc: WndProc,
p_device_context: *mut ID3D11DeviceContext,
render_target_view: *mut ID3D11RenderTargetView,
imgui_ctx: imgui::Context,
renderer: imgui_impl::dx11::Renderer,
render_loop: Box<dyn RenderLoop>,
}
/// State machine for the initialization status of the DXGI hook.
enum DxgiHookState {
Uninitialized,
Hooked(
DXGISwapChainPresentType,
XInputGetStateType,
Box<dyn RenderLoop>,
),
Errored(DXGISwapChainPresentType),
Ok(Box<DxgiHook>),
}
impl Default for DxgiHookState {
fn default() -> DxgiHookState {
DxgiHookState::Uninitialized
}
}
// why does it have to be static FeelsBadMan
static mut DXGI_HOOK_STATE: Cell<DxgiHookState> = Cell::new(DxgiHookState::Uninitialized);
fn cast_dptr<T>(t: &mut *mut T) -> *mut *mut c_void {
t as *mut *mut T as *mut *mut c_void
}
impl DxgiHook {
/// Initialize the DXGI hook.
// TODO URGENT if Result is Err, caller must call present_trampoline
unsafe fn initialize_dx(
present_trampoline: DXGISwapChainPresentType,
p_this: *mut IDXGISwapChain,
render_loop: Box<dyn RenderLoop>,
) -> Result<DxgiHook> {
trace!("Initializing DXGI hook");
let this = &*p_this;
let mut p_device: *mut ID3D11Device = null_mut();
let mut p_device_context: *mut ID3D11DeviceContext = null_mut();
let mut sd: DXGI_SWAP_CHAIN_DESC = std::mem::zeroed();
let mut back_buf: *mut ID3D11Texture2D = null_mut();
let mut render_target_view: *mut ID3D11RenderTargetView = null_mut();
let mut lpc: UINT = 0;
this.GetLastPresentCount(&mut lpc);
let result = this.GetDevice(&ID3D11Device::uuidof(), cast_dptr(&mut p_device));
if result < 0 {
return Err(Error(format!(
"Get device + ctx from swap chain failed: {:?} {:?}",
result, p_this
)));
};
(*p_device).GetImmediateContext(&mut p_device_context);
this.GetDesc(&mut sd as _);
#[allow(clippy::fn_to_numeric_cast)]
let default_wnd_proc = std::mem::transmute(SetWindowLongPtrA(
sd.OutputWindow,
GWLP_WNDPROC,
wnd_proc as _,
));
let mut imgui_ctx = imgui::Context::create();
imgui_ctx.set_ini_filename(None);
imgui_ctx
.fonts()
.add_font(&[imgui::FontSource::DefaultFontData {
config: Some(imgui::FontConfig {
..imgui::FontConfig::default()
}),
}]);
imgui_ctx
.fonts()
.add_font(&[imgui::FontSource::DefaultFontData {
config: Some(imgui::FontConfig {
size_pixels: 26.,
..imgui::FontConfig::default()
}),
}]);
let renderer = imgui_impl::dx11::Renderer::new(p_device, p_device_context, &mut imgui_ctx)?;
this.GetBuffer(
0,
&ID3D11Texture2D::uuidof(),
&mut back_buf as *mut *mut _ as _,
);
(*p_device).CreateRenderTargetView(
back_buf as _,
null_mut(),
&mut render_target_view as *mut *mut _ as _,
);
(*back_buf).Release();
trace!("Initialization completed");
Ok(DxgiHook {
present_trampoline,
default_wnd_proc,
p_device_context,
render_target_view,
imgui_ctx,
renderer,
render_loop,
})
}
/// Render loop function.
///
/// This function is called in place of the regular `IDXGISwapChain::Present`
/// function and is responsible for finally calling the trampoline and
/// letting the game run its own code.
unsafe fn render(
&mut self,
p_this: *mut IDXGISwapChain,
sync_interval: UINT,
flags: UINT,
) -> HRESULT {
let this = &*p_this;
let mut sd: DXGI_SWAP_CHAIN_DESC = std::mem::zeroed();
let mut rect: RECT = std::mem::zeroed();
// SAFETY
// idk lmao
(*self.p_device_context).OMSetRenderTargets(
1,
&mut self.render_target_view as *mut *mut _,
null_mut(),
);
// SAFETY
// No reason this.as_ref() should error at this point, and probably it's a
// good idea to crash and burn if it does. TODO check
this.GetDesc(&mut sd as _);
if GetWindowRect(sd.OutputWindow, &mut rect as _) != 0 {
let mut io = self.imgui_ctx.io_mut();
io.display_size = [
(rect.right - rect.left) as f32,
(rect.bottom - rect.top) as f32,
];
let io = self.imgui_ctx.io();
let keys_down = io
.keys_down
.iter()
.enumerate()
.filter_map(|(idx, &val)| if val { Some(idx) } else { None })
.collect::<Vec<_>>();
let imgui::Io {
key_ctrl,
key_shift,
key_alt,
key_super,
display_size,
..
} = *io;
trace!("Calling render loop");
set_mouse_pos(&mut self.imgui_ctx, sd.OutputWindow);
let ui = self.imgui_ctx.frame();
self.render_loop.render(RenderContext {
frame: &ui,
key_ctrl,
key_shift,
key_alt,
key_super,
keys_down,
display_size,
});
trace!("Rendering frame data");
let dd = ui.render();
if self.render_loop.is_visible() {
trace!("Displaying image data");
match self.renderer.render(dd) {
Ok(_) => {}
Err(e) => error!("Renderer errored: {:?}", e),
};
}
}
(self.present_trampoline)(p_this, sync_interval, flags)
}
}
fn set_mouse_pos(ctx: &mut imgui::Context, hwnd: HWND) {
let io = ctx.io_mut();
let mut pos = POINT { x: 0, y: 0 };
let active_window = unsafe { GetForegroundWindow() };
if active_window != 0 as HWND
&& (active_window == hwnd || unsafe { IsChild(active_window, hwnd) != 0 })
{
let gcp = unsafe { GetCursorPos(&mut pos as *mut _) };
if gcp != 0 && unsafe { ScreenToClient(hwnd, &mut pos as *mut _) } != 0 {
io.mouse_pos[0] = pos.x as _;
io.mouse_pos[1] = pos.y as _;
}
}
}
/// Placeholder `WndProc`.
///
/// Currently processes keydown and keyup events.
unsafe extern "system" fn wnd_proc(
hwnd: HWND,
umsg: UINT,
wparam: WPARAM,
lparam: LPARAM,
) -> isize {
if let DxgiHookState::Ok(hook) = DXGI_HOOK_STATE.get_mut() {
let set_capture = |mouse_down: &[bool], hwnd| {
let any_down = mouse_down.iter().any(|i| *i);
if !any_down && GetCapture() == 0 as HWND {
SetCapture(hwnd);
}
};
let release_capture = |mouse_down: &[bool], hwnd| {
let any_down = mouse_down.iter().any(|i| *i);
if !any_down && GetCapture() == hwnd {
ReleaseCapture();
}
};
match umsg {
WM_KEYDOWN | WM_SYSKEYDOWN => {
if wparam < 256 {
hook.imgui_ctx.io_mut().keys_down[wparam] = true;
}
}
WM_KEYUP | WM_SYSKEYUP => {
if wparam < 256 {
hook.imgui_ctx.io_mut().keys_down[wparam] = false;
}
}
WM_LBUTTONDOWN | WM_LBUTTONDBLCLK => {
set_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
hook.imgui_ctx.io_mut().mouse_down[0] = true;
return 1;
}
WM_RBUTTONDOWN | WM_RBUTTONDBLCLK => {
set_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
hook.imgui_ctx.io_mut().mouse_down[1] = true;
return 1;
}
WM_MBUTTONDOWN | WM_MBUTTONDBLCLK => {
set_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
hook.imgui_ctx.io_mut().mouse_down[2] = true;
return 1;
}
WM_XBUTTONDOWN | WM_XBUTTONDBLCLK => {
let btn = if GET_XBUTTON_WPARAM(wparam) == XBUTTON1 {
3
} else {
4
};
set_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
hook.imgui_ctx.io_mut().mouse_down[btn] = true;
return 1;
}
WM_LBUTTONUP => {
hook.imgui_ctx.io_mut().mouse_down[0] = false;
release_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
return 1;
}
WM_RBUTTONUP => {
hook.imgui_ctx.io_mut().mouse_down[1] = false;
release_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
return 1;
}
WM_MBUTTONUP => {
hook.imgui_ctx.io_mut().mouse_down[2] = false;
release_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
return 1;
}
WM_XBUTTONUP => {
let btn = if GET_XBUTTON_WPARAM(wparam) == XBUTTON1 {
3
} else {
4
};
hook.imgui_ctx.io_mut().mouse_down[btn] = false;
release_capture(&hook.imgui_ctx.io().mouse_down, hwnd);
}
WM_MOUSEWHEEL => {
hook.imgui_ctx.io_mut().mouse_wheel +=
(GET_WHEEL_DELTA_WPARAM(wparam) as f32) / (WHEEL_DELTA as f32);
}
WM_MOUSEHWHEEL => {
hook.imgui_ctx.io_mut().mouse_wheel_h +=
(GET_WHEEL_DELTA_WPARAM(wparam) as f32) / (WHEEL_DELTA as f32);
}
WM_CHAR => hook
.imgui_ctx
.io_mut()
.add_input_character(wparam as u8 as char),
_ => {}
}
CallWindowProcW(Some(hook.default_wnd_proc), hwnd, umsg, wparam, lparam)
} else {
0
}
}
#[allow(non_snake_case)]
extern "system" fn XInputGetStateOverride(
dw_user_index: DWORD,
p_state: *mut XINPUT_STATE,
) -> DWORD {
let mut state: XINPUT_STATE = unsafe { std::mem::zeroed() };
let retval = unsafe { XInputGetState(dw_user_index, &mut state as *mut _) };
if let Some(m) = unsafe { p_state.as_mut() } {
*m = state;
}
retval
}
/// Implementation of the hooked `Present` function.
///
/// Implements a state machine to move the hook from uninitialized, to
/// hooked, to rendering or errored.
#[allow(non_snake_case)]
unsafe extern "system" fn DXGISwapChainPresentOverride(
this: *mut IDXGISwapChain,
sync_interval: UINT,
flags: UINT,
) -> HRESULT {
// State transition the dxgi hook struct
DXGI_HOOK_STATE.replace(match DXGI_HOOK_STATE.take() {
DxgiHookState::Uninitialized => {
unreachable!("DXGI Hook State uninitialized in present_impl -- this should never happen!")
}
DxgiHookState::Hooked(present_trampoline, xigs_trampoline, render_loop) => {
match DxgiHook::initialize_dx(present_trampoline, this, render_loop) {
Ok(dh) => DxgiHookState::Ok(Box::new(dh)),
Err(e) => {
error!("DXGI Hook initialization failed: {:?}", e);
DxgiHookState::Errored(present_trampoline)
}
}
}
DxgiHookState::Errored(present_trampoline) => DxgiHookState::Errored(present_trampoline),
DxgiHookState::Ok(dh) => DxgiHookState::Ok(dh),
});
match DXGI_HOOK_STATE.get_mut() {
DxgiHookState::Uninitialized => unreachable!(),
DxgiHookState::Hooked(_, _, _) => unreachable!(),
DxgiHookState::Errored(present_trampoline) => present_trampoline(this, sync_interval, flags),
DxgiHookState::Ok(dxgi_hook) => dxgi_hook.render(this, sync_interval, flags),
}
}
/// Get the `IDXGISwapChain::Present` function address.
///
/// Creates a swap chain + device instance and looks up its
/// vtable to find the address.
fn get_present_addr() -> Result<LPVOID> {
let hwnd = {
let hinstance = unsafe { winapi::um::libloaderapi::GetModuleHandleA(std::ptr::null::<i8>()) };
let wnd_class = WNDCLASSA {
style: CS_OWNDC | CS_HREDRAW | CS_VREDRAW,
lpfnWndProc: Some(DefWindowProcA),
hInstance: hinstance,
lpszClassName: "HUDHOOK_DUMMY\0".as_ptr() as *const i8,
cbClsExtra: 0,
cbWndExtra: 0,
hIcon: 0 as HICON,
hCursor: 0 as HICON,
hbrBackground: 0 as HBRUSH,
lpszMenuName: std::ptr::null::<i8>(),
};
unsafe {
RegisterClassA(&wnd_class);
CreateWindowExA(
0,
"HUDHOOK_DUMMY\0".as_ptr() as _,
"HUDHOOK_DUMMY\0".as_ptr() as _,
WS_OVERLAPPEDWINDOW | WS_VISIBLE,
0,
0,
16,
16,
0 as HWND,
0 as HMENU,
std::mem::transmute(hinstance),
0 as LPVOID,
)
}
};
let mut feature_level = D3D_FEATURE_LEVEL_11_0;
let mut swap_chain_desc: DXGI_SWAP_CHAIN_DESC = unsafe { std::mem::zeroed() };
let mut p_device: *mut ID3D11Device = null_mut();
let mut p_context: *mut ID3D11DeviceContext = null_mut();
let mut p_swap_chain: *mut IDXGISwapChain = null_mut();
swap_chain_desc.BufferCount = 1;
swap_chain_desc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swap_chain_desc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
swap_chain_desc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
swap_chain_desc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
swap_chain_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swap_chain_desc.OutputWindow = hwnd;
swap_chain_desc.SampleDesc.Count = 1;
swap_chain_desc.Windowed = 1;
let result = unsafe {
D3D11CreateDeviceAndSwapChain(
std::ptr::null_mut::<IDXGIAdapter>(),
D3D_DRIVER_TYPE_HARDWARE,
0 as HMODULE,
0u32,
&mut feature_level as *mut D3D_FEATURE_LEVEL,
1,
D3D11_SDK_VERSION,
&mut swap_chain_desc as *mut DXGI_SWAP_CHAIN_DESC,
&mut p_swap_chain as *mut *mut IDXGISwapChain,
&mut p_device as *mut *mut ID3D11Device,
null_mut(),
&mut p_context as *mut *mut ID3D11DeviceContext,
)
};
if result < 0 {
return Err(Error(format!(
"D3D11CreateDeviceAndSwapChain failed {:x}",
result
)));
}
let ret = unsafe { (*(*p_swap_chain).lpVtbl).Present };
unsafe {
(*p_device).Release();
(*p_context).Release();
(*p_swap_chain).Release();
DestroyWindow(hwnd);
}
Ok(ret as LPVOID)
}
unsafe fn get_xinput_addr() -> LPVOID {
let xinput_dll = LoadLibraryA(CString::new("xinput1_3.dll").unwrap().as_c_str().as_ptr());
let xinput_addr = GetProcAddress(
xinput_dll,
CString::new("XInputGetState").unwrap().as_c_str().as_ptr(),
);
xinput_addr as _
}
// ==================
// === PUBLIC API ===
// ==================
/// Interface for implementing the render loop.
///
pub trait RenderLoop {
/// Invoked once per frame. Memory management and UI visualization (via the
/// current frame's `imgui::Ui` instance) should be made inside of it.
fn render(&mut self, ctx: RenderContext);
/// Return `true` when you want your UI to be rendered to screen.
///
/// The [`render`](#tyrender) method will still be called, but the draw data
/// will not be displayed
fn is_visible(&self) -> bool;
/// Return `true` when you want the underlying application to stop receiving
/// `WndProc` events. Presently not functioning.
fn is_capturing(&self) -> bool;
}
/// Information context made available to the RenderLoop
///
/// For now, it is a subset of the `imgui` context crafted in such a way that
/// it is difficult to break the (limited) intended way of operating the library.
pub struct RenderContext<'a> {
pub frame: &'a imgui::Ui<'a>,
pub key_ctrl: bool,
pub key_shift: bool,
pub key_alt: bool,
pub key_super: bool,
pub keys_down: Vec<usize>,
pub display_size: [f32; 2],
}
/// Inner entry point for the library.
///
/// Should not be invoked directly, but via the `hook!` macro which will
/// also provide the `WinMain` entry point.
///
/// This function finds the `IDXGISwapChain::Present` function address,
/// creates and enables the hook via `MinHook`. Returns the callback to the
/// trampoline function, if successful.
pub unsafe fn apply_hook(
render_loop: Box<dyn RenderLoop>,
) -> Result<(DXGISwapChainPresentType, XInputGetStateType)> {
let xinput_addr = get_xinput_addr();
info!("XInputGetState = {:p}", xinput_addr);
let dxgi_swap_chain_present_addr = get_present_addr().unwrap();
info!(
"IDXGISwapChain::Present = {:p}",
dxgi_swap_chain_present_addr
);
let mut xinput_get_state_trampoline = MaybeUninit::<XInputGetStateType>::uninit();
let mut dxgi_swap_chain_present_trampoline = MaybeUninit::<DXGISwapChainPresentType>::uninit();
let status = mh::MH_Initialize();
info!("MH_Initialize: {:?}", status);
// Hook IDXGISwapCHain::Present
let status = mh::MH_CreateHook(
dxgi_swap_chain_present_addr,
DXGISwapChainPresentOverride as LPVOID,
&mut dxgi_swap_chain_present_trampoline as *mut _ as _,
);
info!("MH_CreateHook: {:?}", status);
let status = mh::MH_QueueEnableHook(dxgi_swap_chain_present_addr);
info!("MH_QueueEnableHook: {:?}", status);
// Hook XInputGetState
let status = mh::MH_CreateHook(
xinput_addr,
XInputGetStateOverride as LPVOID,
&mut xinput_get_state_trampoline as *mut _ as _,
);
info!("MH_CreateHook: {:?}", status);
let status = mh::MH_QueueEnableHook(xinput_addr);
info!("MH_QueueEnableHook: {:?}", status);
let status = mh::MH_ApplyQueued();
info!("MH_ApplyQueued: {:?}", status);
Ok((
dxgi_swap_chain_present_trampoline.assume_init(),
xinput_get_state_trampoline.assume_init(),
))
}
| 29.746293 | 102 | 0.643996 |
8f0ce5b32da7eaddb44d76405d9358c88c380641 | 8,485 | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::biguint::biguint_from_vec;
use num_integer::Integer;
use num_traits::{ToPrimitive, Zero};
/// A trait for sampling random big integers.
///
/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: u64) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: u64) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
fn gen_bits<R: Rng + ?Sized>(rng: &mut R, data: &mut [u32], rem: u64) {
// `fill` is faster than many `gen::<u32>` calls
rng.fill(data);
if rem > 0 {
let last = data.len() - 1;
data[last] >>= 32 - rem;
}
}
impl<R: Rng + ?Sized> RandBigInt for R {
#[cfg(not(u64_digit))]
fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
let (digits, rem) = bit_size.div_rem(&32);
let len = (digits + (rem > 0) as u64)
.to_usize()
.expect("capacity overflow");
let mut data = vec![0u32; len];
gen_bits(self, &mut data, rem);
biguint_from_vec(data)
}
#[cfg(u64_digit)]
fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
use core::slice;
let (digits, rem) = bit_size.div_rem(&32);
let len = (digits + (rem > 0) as u64)
.to_usize()
.expect("capacity overflow");
let native_digits = Integer::div_ceil(&bit_size, &64);
let native_len = native_digits.to_usize().expect("capacity overflow");
let mut data = vec![0u64; native_len];
unsafe {
// Generate bits in a `&mut [u32]` slice for value stability
let ptr = data.as_mut_ptr() as *mut u32;
debug_assert!(native_len * 2 >= len);
let data = slice::from_raw_parts_mut(ptr, len);
gen_bits(self, data, rem);
}
#[cfg(target_endian = "big")]
for digit in &mut data {
// swap u32 digits into u64 endianness
*digit = (*digit << 32) | (*digit >> 32);
}
biguint_from_vec(data)
}
fn gen_bigint(&mut self, bit_size: u64) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(ubound.magnitude()))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(lbound.magnitude()))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(delta.magnitude()))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
rng.gen_biguint_range(low.borrow(), high.borrow())
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: (high - low).into_parts().1,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
rng.gen_bigint_range(low.borrow(), high.borrow())
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
///
/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: u64,
}
impl RandomBits {
#[inline]
pub fn new(bits: u64) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
| 29.876761 | 94 | 0.567001 |
1da60d3526a4bc98e1d2941e41eaae31766d8023 | 2,170 | //! Data which is used in pointer callbacks.
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use sctk::reexports::client::protocol::wl_surface::WlSurface;
use sctk::reexports::client::Attached;
use sctk::reexports::protocols::unstable::pointer_constraints::v1::client::zwp_pointer_constraints_v1::{ZwpPointerConstraintsV1};
use sctk::reexports::protocols::unstable::pointer_constraints::v1::client::zwp_confined_pointer_v1::ZwpConfinedPointerV1;
use crate::event::{ModifiersState, TouchPhase};
/// A data being used by pointer handlers.
pub(super) struct PointerData {
/// Winit's surface the pointer is currently over.
pub surface: Option<WlSurface>,
/// Current modifiers state.
///
/// This refers a state of modifiers from `WlKeyboard` on
/// the given seat.
pub modifiers_state: Rc<RefCell<ModifiersState>>,
/// Pointer constraints.
pub pointer_constraints: Option<Attached<ZwpPointerConstraintsV1>>,
pub confined_pointer: Rc<RefCell<Option<ZwpConfinedPointerV1>>>,
/// A latest event serial.
pub latest_serial: Rc<Cell<u32>>,
/// The currently accumulated axis data on a pointer.
pub axis_data: AxisData,
}
impl PointerData {
pub fn new(
confined_pointer: Rc<RefCell<Option<ZwpConfinedPointerV1>>>,
pointer_constraints: Option<Attached<ZwpPointerConstraintsV1>>,
modifiers_state: Rc<RefCell<ModifiersState>>,
) -> Self {
Self {
surface: None,
latest_serial: Rc::new(Cell::new(0)),
confined_pointer,
modifiers_state,
pointer_constraints,
axis_data: AxisData::new(),
}
}
}
/// Axis data.
#[derive(Clone, Copy)]
pub(super) struct AxisData {
/// Current state of the axis.
pub axis_state: TouchPhase,
/// A buffer for `PixelDelta` event.
pub axis_buffer: Option<(f32, f32)>,
/// A buffer for `LineDelta` event.
pub axis_discrete_buffer: Option<(f32, f32)>,
}
impl AxisData {
pub fn new() -> Self {
Self {
axis_state: TouchPhase::Ended,
axis_buffer: None,
axis_discrete_buffer: None,
}
}
}
| 28.933333 | 129 | 0.662212 |
18d8b23d8e3c14ec72f0ad8ad34013ed515702dd | 1,109 | use crate::ffi::object::PyObject;
use std::os::raw::{c_char, c_double, c_int};
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
#[cfg_attr(PyPy, link_name = "PyPyOS_string_to_double")]
pub fn PyOS_string_to_double(
str: *const c_char,
endptr: *mut *mut c_char,
overflow_exception: *mut PyObject,
) -> c_double;
#[cfg_attr(PyPy, link_name = "PyPyOS_double_to_string")]
pub fn PyOS_double_to_string(
val: c_double,
format_code: c_char,
precision: c_int,
flags: c_int,
_type: *mut c_int,
) -> *mut c_char;
}
/* PyOS_double_to_string's "flags" parameter can be set to 0 or more of: */
pub const Py_DTSF_SIGN: c_int = 0x01; /* always add the sign */
pub const Py_DTSF_ADD_DOT_0: c_int = 0x02; /* if the result is an integer add ".0" */
pub const Py_DTSF_ALT: c_int = 0x04; /* "alternate" formatting. it's format_code specific */
/* PyOS_double_to_string's "type", if non-NULL, will be set to one of: */
pub const Py_DTST_FINITE: c_int = 0;
pub const Py_DTST_INFINITE: c_int = 1;
pub const Py_DTST_NAN: c_int = 2;
| 35.774194 | 92 | 0.666366 |
e881a32f78fd74ee22f3b6ad88834576cd5dc331 | 2,470 | #![recursion_limit = "256"]
use linkerd2_error::Error;
use std::{future::Future, pin::Pin, task::Poll, time::Duration};
use tokio::sync::{mpsc, oneshot, watch};
mod dispatch;
pub mod error;
mod layer;
mod service;
pub use self::{layer::SpawnBufferLayer, service::Buffer};
struct InFlight<Req, Rsp> {
request: Req,
tx: oneshot::Sender<
Result<Pin<Box<dyn Future<Output = Result<Rsp, Error>> + Send + 'static>>, Error>,
>,
}
pub(crate) fn new<Req, S>(
inner: S,
capacity: usize,
idle_timeout: Option<Duration>,
) -> (
Buffer<Req, S::Response>,
impl std::future::Future<Output = ()> + Send + 'static,
)
where
Req: Send + 'static,
S: tower::Service<Req> + Send + 'static,
S::Error: Into<Error> + Send + 'static,
S::Response: Send + 'static,
S::Future: Send + 'static,
{
use futures::future;
let (tx, rx) = mpsc::channel(capacity);
let (ready_tx, ready_rx) = watch::channel(Poll::Pending);
let idle = move || match idle_timeout {
Some(t) => future::Either::Left(dispatch::idle(t)),
None => future::Either::Right(future::pending()),
};
let dispatch = dispatch::run(inner, rx, ready_tx, idle);
(Buffer::new(tx, ready_rx), dispatch)
}
#[cfg(test)]
mod test {
use std::task::Poll;
use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task};
use tower_test::mock;
#[test]
fn propagates_readiness() {
let (service, mut handle) = mock::pair::<(), ()>();
let (service, dispatch) = super::new(service, 1, None);
handle.allow(0);
let mut service = mock::Spawn::new(service);
let mut dispatch = task::spawn(dispatch);
assert_pending!(dispatch.poll());
assert_pending!(service.poll_ready());
handle.allow(1);
assert_pending!(dispatch.poll());
assert_ready_ok!(service.poll_ready());
// Consume the allowed call.
drop(service.call(()));
handle.send_error(Bad);
assert_pending!(dispatch.poll());
assert!(
matches!(service.poll_ready(), Poll::Ready(Err(e)) if e.source().unwrap().is::<Bad>())
);
drop(service);
assert_ready!(dispatch.poll());
}
#[derive(Debug)]
struct Bad;
impl std::error::Error for Bad {}
impl std::fmt::Display for Bad {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "bad")
}
}
}
| 27.142857 | 98 | 0.589474 |
8959d3b4be49c04e772fbb6334c1958c2eec10e0 | 8,116 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::InetAddress;
use crate::Initable;
use crate::SocketFamily;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
use std::ptr;
glib::wrapper! {
#[doc(alias = "GInetAddressMask")]
pub struct InetAddressMask(Object<ffi::GInetAddressMask, ffi::GInetAddressMaskClass>) @implements Initable;
match fn {
type_ => || ffi::g_inet_address_mask_get_type(),
}
}
impl InetAddressMask {
#[doc(alias = "g_inet_address_mask_new")]
pub fn new<P: IsA<InetAddress>>(addr: &P, length: u32) -> Result<InetAddressMask, glib::Error> {
unsafe {
let mut error = ptr::null_mut();
let ret =
ffi::g_inet_address_mask_new(addr.as_ref().to_glib_none().0, length, &mut error);
if error.is_null() {
Ok(from_glib_full(ret))
} else {
Err(from_glib_full(error))
}
}
}
#[doc(alias = "g_inet_address_mask_new_from_string")]
#[doc(alias = "new_from_string")]
pub fn from_string(mask_string: &str) -> Result<InetAddressMask, glib::Error> {
unsafe {
let mut error = ptr::null_mut();
let ret =
ffi::g_inet_address_mask_new_from_string(mask_string.to_glib_none().0, &mut error);
if error.is_null() {
Ok(from_glib_full(ret))
} else {
Err(from_glib_full(error))
}
}
}
}
impl fmt::Display for InetAddressMask {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&InetAddressMaskExt::to_str(self))
}
}
unsafe impl Send for InetAddressMask {}
unsafe impl Sync for InetAddressMask {}
pub const NONE_INET_ADDRESS_MASK: Option<&InetAddressMask> = None;
pub trait InetAddressMaskExt: 'static {
#[doc(alias = "g_inet_address_mask_equal")]
fn equal<P: IsA<InetAddressMask>>(&self, mask2: &P) -> bool;
#[doc(alias = "g_inet_address_mask_get_address")]
#[doc(alias = "get_address")]
fn address(&self) -> InetAddress;
#[doc(alias = "g_inet_address_mask_get_family")]
#[doc(alias = "get_family")]
fn family(&self) -> SocketFamily;
#[doc(alias = "g_inet_address_mask_get_length")]
#[doc(alias = "get_length")]
fn length(&self) -> u32;
#[doc(alias = "g_inet_address_mask_matches")]
fn matches<P: IsA<InetAddress>>(&self, address: &P) -> bool;
#[doc(alias = "g_inet_address_mask_to_string")]
#[doc(alias = "to_string")]
fn to_str(&self) -> glib::GString;
fn set_address<P: IsA<InetAddress>>(&self, address: Option<&P>);
fn set_length(&self, length: u32);
#[doc(alias = "address")]
fn connect_address_notify<F: Fn(&Self) + Send + Sync + 'static>(&self, f: F)
-> SignalHandlerId;
#[doc(alias = "family")]
fn connect_family_notify<F: Fn(&Self) + Send + Sync + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "length")]
fn connect_length_notify<F: Fn(&Self) + Send + Sync + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<InetAddressMask>> InetAddressMaskExt for O {
fn equal<P: IsA<InetAddressMask>>(&self, mask2: &P) -> bool {
unsafe {
from_glib(ffi::g_inet_address_mask_equal(
self.as_ref().to_glib_none().0,
mask2.as_ref().to_glib_none().0,
))
}
}
fn address(&self) -> InetAddress {
unsafe {
from_glib_none(ffi::g_inet_address_mask_get_address(
self.as_ref().to_glib_none().0,
))
}
}
fn family(&self) -> SocketFamily {
unsafe {
from_glib(ffi::g_inet_address_mask_get_family(
self.as_ref().to_glib_none().0,
))
}
}
fn length(&self) -> u32 {
unsafe { ffi::g_inet_address_mask_get_length(self.as_ref().to_glib_none().0) }
}
fn matches<P: IsA<InetAddress>>(&self, address: &P) -> bool {
unsafe {
from_glib(ffi::g_inet_address_mask_matches(
self.as_ref().to_glib_none().0,
address.as_ref().to_glib_none().0,
))
}
}
fn to_str(&self) -> glib::GString {
unsafe {
from_glib_full(ffi::g_inet_address_mask_to_string(
self.as_ref().to_glib_none().0,
))
}
}
fn set_address<P: IsA<InetAddress>>(&self, address: Option<&P>) {
unsafe {
glib::gobject_ffi::g_object_set_property(
self.to_glib_none().0 as *mut glib::gobject_ffi::GObject,
b"address\0".as_ptr() as *const _,
address.to_value().to_glib_none().0,
);
}
}
fn set_length(&self, length: u32) {
unsafe {
glib::gobject_ffi::g_object_set_property(
self.to_glib_none().0 as *mut glib::gobject_ffi::GObject,
b"length\0".as_ptr() as *const _,
length.to_value().to_glib_none().0,
);
}
}
fn connect_address_notify<F: Fn(&Self) + Send + Sync + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn notify_address_trampoline<
P: IsA<InetAddressMask>,
F: Fn(&P) + Send + Sync + 'static,
>(
this: *mut ffi::GInetAddressMask,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(InetAddressMask::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::address\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_address_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_family_notify<F: Fn(&Self) + Send + Sync + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_family_trampoline<
P: IsA<InetAddressMask>,
F: Fn(&P) + Send + Sync + 'static,
>(
this: *mut ffi::GInetAddressMask,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(InetAddressMask::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::family\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_family_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_length_notify<F: Fn(&Self) + Send + Sync + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_length_trampoline<
P: IsA<InetAddressMask>,
F: Fn(&P) + Send + Sync + 'static,
>(
this: *mut ffi::GInetAddressMask,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(InetAddressMask::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::length\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_length_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
| 31.952756 | 111 | 0.541893 |
d92be2a76b2de0a0ef005485fcda3787cf814158 | 1,996 | extern crate utils;
use std::env;
use std::io::{self, BufReader};
use std::io::prelude::*;
use std::fs::File;
use utils::*;
type Input = Vec<Vec<char>>;
fn in_memory_len(s: &Vec<char>) -> usize {
let s = &s[1..(s.len() - 1)];
let mut cnt = 0;
let mut i = 0;
while i < s.len() {
if s[i] == '\\' {
if i < s.len() - 1 {
if s[i + 1] == 'x' {
i += 2;
}
i += 1;
}
}
cnt += 1;
i += 1;
}
cnt
}
fn part1(input: &Input) -> usize {
let (code_len, memory_len) = input.iter()
.fold((0, 0), |(cl, ml), s| {
(cl + s.len(), ml + in_memory_len(s))
});
code_len - memory_len
}
fn encoded_len(s: &Vec<char>) -> usize {
s.iter().fold(0, |l, c| l + if *c == '\\' || *c == '"' { 2 } else { 1 }) + 2
}
fn part2(input: &Input) -> usize {
let (code_len, encoded_len) = input.iter()
.fold((0, 0), |(cl, el), s| {
(cl + s.len(), el + encoded_len(s))
});
encoded_len - code_len
}
fn main() {
measure(|| {
let input = input().expect("Input failed");
println!("Part1: {}", part1(&input));
println!("Part2: {}", part2(&input));
});
}
fn input() -> io::Result<Input> {
let f = File::open(env::args().skip(1).next().expect("No input file given"))?;
let f = BufReader::new(f);
Ok(f.lines().map(|l| l.unwrap().chars().collect()).collect())
}
#[cfg(test)]
mod tests {
use super::*;
const INPUT: &'static str =
r#"
""
"abc"
"aaa\"aaa"
"\x27"
"#;
fn as_input(s: &str) -> Input {
s.split('\n').map(|s| s.trim().into()).filter(|s: &String| s.len() > 0).map(|s| s.chars().collect()).collect()
}
#[test]
fn test_part1() {
assert_eq!(part1(&as_input(INPUT)), 12);
}
#[test]
fn test_part2() {
assert_eq!(part2(&as_input(INPUT)), 19);
}
} | 21.695652 | 118 | 0.446894 |
01ff73c1ff72c15a858ba7b6f2df22d5446c5a4b | 120 | mod descriptor;
mod arg_type;
mod value;
pub use arg_type::Type;
pub use value::Value;
pub use descriptor::Descriptor;
| 15 | 31 | 0.766667 |
dea07a45d872ee234869c05f5b37edd1d6067833 | 28,423 | // Copyright (c) 2021 The Vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Parsing and analysis utilities for SPIR-V shader binaries.
//!
//! This can be used to inspect and validate a SPIR-V module at runtime. The `Spirv` type does some
//! validation, but you should not assume that code that is read successfully is valid.
//!
//! For more information about SPIR-V modules, instructions and types, see the
//! [SPIR-V specification](https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html).
use crate::Version;
use std::{
collections::HashMap,
error::Error,
fmt::{self, Display, Formatter},
ops::Range,
string::FromUtf8Error,
};
// Generated by build.rs
include!(concat!(env!("OUT_DIR"), "/spirv.rs"));
/// A parsed and analyzed SPIR-V module.
#[derive(Clone, Debug)]
pub struct Spirv {
version: Version,
bound: u32,
instructions: Vec<Instruction>,
ids: HashMap<Id, IdDataIndices>,
// Items described in the spec section "Logical Layout of a Module"
range_capability: Range<usize>,
range_extension: Range<usize>,
range_ext_inst_import: Range<usize>,
memory_model: usize,
range_entry_point: Range<usize>,
range_execution_mode: Range<usize>,
range_name: Range<usize>,
range_decoration: Range<usize>,
range_global: Range<usize>,
}
impl Spirv {
/// Parses a SPIR-V document from a list of words.
pub fn new(words: &[u32]) -> Result<Spirv, SpirvError> {
if words.len() < 5 {
return Err(SpirvError::InvalidHeader);
}
if words[0] != 0x07230203 {
return Err(SpirvError::InvalidHeader);
}
let version = Version {
major: (words[1] & 0x00ff0000) >> 16,
minor: (words[1] & 0x0000ff00) >> 8,
patch: words[1] & 0x000000ff,
};
let bound = words[3];
let instructions = {
let mut ret = Vec::new();
let mut rest = &words[5..];
while !rest.is_empty() {
let word_count = (rest[0] >> 16) as usize;
assert!(word_count >= 1);
if rest.len() < word_count {
return Err(ParseError {
instruction: ret.len(),
word: rest.len(),
error: ParseErrors::UnexpectedEOF,
words: rest.to_owned(),
}
.into());
}
let mut reader = InstructionReader::new(&rest[0..word_count], ret.len());
let instruction = Instruction::parse(&mut reader)?;
if !reader.is_empty() {
return Err(reader.map_err(ParseErrors::LeftoverOperands).into());
}
ret.push(instruction);
rest = &rest[word_count..];
}
ret
};
// It is impossible for a valid SPIR-V file to contain more Ids than instructions, so put
// a sane upper limit on the allocation. This prevents a malicious file from causing huge
// memory allocations.
let mut ids = HashMap::with_capacity(instructions.len().min(bound as usize));
let mut range_capability: Option<Range<usize>> = None;
let mut range_extension: Option<Range<usize>> = None;
let mut range_ext_inst_import: Option<Range<usize>> = None;
let mut range_memory_model: Option<Range<usize>> = None;
let mut range_entry_point: Option<Range<usize>> = None;
let mut range_execution_mode: Option<Range<usize>> = None;
let mut range_name: Option<Range<usize>> = None;
let mut range_decoration: Option<Range<usize>> = None;
let mut range_global: Option<Range<usize>> = None;
let mut in_function = false;
fn set_range(range: &mut Option<Range<usize>>, index: usize) -> Result<(), SpirvError> {
if let Some(range) = range {
if range.end != index {
return Err(SpirvError::BadLayout { index });
}
range.end = index + 1;
} else {
*range = Some(Range {
start: index,
end: index + 1,
});
}
Ok(())
}
for (index, instruction) in instructions.iter().enumerate() {
if let Some(id) = instruction.result_id() {
if u32::from(id) >= bound {
return Err(SpirvError::IdOutOfBounds { id, index, bound });
}
let members = if let Instruction::TypeStruct { member_types, .. } = instruction {
member_types
.iter()
.map(|_| StructMemberDataIndices::default())
.collect()
} else {
Vec::new()
};
let data = IdDataIndices {
index,
names: Vec::new(),
decorations: Vec::new(),
members,
};
if let Some(first) = ids.insert(id, data) {
return Err(SpirvError::DuplicateId {
id,
first_index: first.index,
second_index: index,
});
}
}
match instruction {
Instruction::Capability { .. } => set_range(&mut range_capability, index)?,
Instruction::Extension { .. } => set_range(&mut range_extension, index)?,
Instruction::ExtInstImport { .. } => set_range(&mut range_ext_inst_import, index)?,
Instruction::MemoryModel { .. } => set_range(&mut range_memory_model, index)?,
Instruction::EntryPoint { .. } => set_range(&mut range_entry_point, index)?,
Instruction::ExecutionMode { .. } | Instruction::ExecutionModeId { .. } => {
set_range(&mut range_execution_mode, index)?
}
Instruction::Name { .. } | Instruction::MemberName { .. } => {
set_range(&mut range_name, index)?
}
Instruction::Decorate { .. }
| Instruction::MemberDecorate { .. }
| Instruction::DecorationGroup { .. }
| Instruction::GroupDecorate { .. }
| Instruction::GroupMemberDecorate { .. }
| Instruction::DecorateId { .. }
| Instruction::DecorateString { .. }
| Instruction::MemberDecorateString { .. } => {
set_range(&mut range_decoration, index)?
}
Instruction::TypeVoid { .. }
| Instruction::TypeBool { .. }
| Instruction::TypeInt { .. }
| Instruction::TypeFloat { .. }
| Instruction::TypeVector { .. }
| Instruction::TypeMatrix { .. }
| Instruction::TypeImage { .. }
| Instruction::TypeSampler { .. }
| Instruction::TypeSampledImage { .. }
| Instruction::TypeArray { .. }
| Instruction::TypeRuntimeArray { .. }
| Instruction::TypeStruct { .. }
| Instruction::TypeOpaque { .. }
| Instruction::TypePointer { .. }
| Instruction::TypeFunction { .. }
| Instruction::TypeEvent { .. }
| Instruction::TypeDeviceEvent { .. }
| Instruction::TypeReserveId { .. }
| Instruction::TypeQueue { .. }
| Instruction::TypePipe { .. }
| Instruction::TypeForwardPointer { .. }
| Instruction::TypePipeStorage { .. }
| Instruction::TypeNamedBarrier { .. }
| Instruction::TypeRayQueryKHR { .. }
| Instruction::TypeAccelerationStructureKHR { .. }
| Instruction::TypeCooperativeMatrixNV { .. }
| Instruction::TypeVmeImageINTEL { .. }
| Instruction::TypeAvcImePayloadINTEL { .. }
| Instruction::TypeAvcRefPayloadINTEL { .. }
| Instruction::TypeAvcSicPayloadINTEL { .. }
| Instruction::TypeAvcMcePayloadINTEL { .. }
| Instruction::TypeAvcMceResultINTEL { .. }
| Instruction::TypeAvcImeResultINTEL { .. }
| Instruction::TypeAvcImeResultSingleReferenceStreamoutINTEL { .. }
| Instruction::TypeAvcImeResultDualReferenceStreamoutINTEL { .. }
| Instruction::TypeAvcImeSingleReferenceStreaminINTEL { .. }
| Instruction::TypeAvcImeDualReferenceStreaminINTEL { .. }
| Instruction::TypeAvcRefResultINTEL { .. }
| Instruction::TypeAvcSicResultINTEL { .. }
| Instruction::ConstantTrue { .. }
| Instruction::ConstantFalse { .. }
| Instruction::Constant { .. }
| Instruction::ConstantComposite { .. }
| Instruction::ConstantSampler { .. }
| Instruction::ConstantNull { .. }
| Instruction::ConstantPipeStorage { .. }
| Instruction::SpecConstantTrue { .. }
| Instruction::SpecConstantFalse { .. }
| Instruction::SpecConstant { .. }
| Instruction::SpecConstantComposite { .. }
| Instruction::SpecConstantOp { .. } => set_range(&mut range_global, index)?,
Instruction::Variable { storage_class, .. }
if *storage_class != StorageClass::Function =>
{
set_range(&mut range_global, index)?
}
Instruction::Function { .. } => {
in_function = true;
}
Instruction::Line { .. } | Instruction::NoLine { .. } => {
if !in_function {
set_range(&mut range_global, index)?
}
}
_ => (),
}
}
let mut spirv = Spirv {
version,
bound,
instructions,
ids,
range_capability: range_capability.unwrap_or_default(),
range_extension: range_extension.unwrap_or_default(),
range_ext_inst_import: range_ext_inst_import.unwrap_or_default(),
memory_model: if let Some(range) = range_memory_model {
if range.end - range.start != 1 {
return Err(SpirvError::MemoryModelInvalid);
}
range.start
} else {
return Err(SpirvError::MemoryModelInvalid);
},
range_entry_point: range_entry_point.unwrap_or_default(),
range_execution_mode: range_execution_mode.unwrap_or_default(),
range_name: range_name.unwrap_or_default(),
range_decoration: range_decoration.unwrap_or_default(),
range_global: range_global.unwrap_or_default(),
};
for index in spirv.range_name.clone() {
match &spirv.instructions[index] {
Instruction::Name { target, .. } => {
spirv.ids.get_mut(target).unwrap().names.push(index);
}
Instruction::MemberName { ty, member, .. } => {
spirv.ids.get_mut(ty).unwrap().members[*member as usize]
.names
.push(index);
}
_ => unreachable!(),
}
}
// First handle all regular decorations, including those targeting decoration groups.
for index in spirv.range_decoration.clone() {
match &spirv.instructions[index] {
Instruction::Decorate { target, .. }
| Instruction::DecorateId { target, .. }
| Instruction::DecorateString { target, .. } => {
spirv.ids.get_mut(target).unwrap().decorations.push(index);
}
Instruction::MemberDecorate {
structure_type: target,
member,
..
}
| Instruction::MemberDecorateString {
struct_type: target,
member,
..
} => {
spirv.ids.get_mut(target).unwrap().members[*member as usize]
.decorations
.push(index);
}
_ => (),
}
}
// Then, with decoration groups having their lists complete, handle group decorates.
for index in spirv.range_decoration.clone() {
match &spirv.instructions[index] {
Instruction::GroupDecorate {
decoration_group,
targets,
..
} => {
let indices = {
let data = &spirv.ids[decoration_group];
if !matches!(
spirv.instructions[data.index],
Instruction::DecorationGroup { .. }
) {
return Err(SpirvError::GroupDecorateNotGroup { index });
};
data.decorations.clone()
};
for target in targets {
spirv
.ids
.get_mut(target)
.unwrap()
.decorations
.extend(&indices);
}
}
Instruction::GroupMemberDecorate {
decoration_group,
targets,
..
} => {
let indices = {
let data = &spirv.ids[decoration_group];
if !matches!(
spirv.instructions[data.index],
Instruction::DecorationGroup { .. }
) {
return Err(SpirvError::GroupDecorateNotGroup { index });
};
data.decorations.clone()
};
for (target, member) in targets {
spirv.ids.get_mut(target).unwrap().members[*member as usize]
.decorations
.extend(&indices);
}
}
_ => (),
}
}
Ok(spirv)
}
/// Returns a reference to the instructions in the module.
#[inline]
pub fn instructions(&self) -> &[Instruction] {
&self.instructions
}
/// Returns the SPIR-V version that the module is compiled for.
#[inline]
pub fn version(&self) -> Version {
self.version
}
/// Returns the upper bound of `Id`s. All `Id`s should have a numeric value strictly less than
/// this value.
#[inline]
pub fn bound(&self) -> u32 {
self.bound
}
/// Returns information about an `Id`.
///
/// # Panics
///
/// - Panics if `id` is not defined in this module. This can in theory only happpen if you are
/// mixing `Id`s from different modules.
#[inline]
pub fn id<'a>(&'a self, id: Id) -> IdInfo<'a> {
IdInfo {
data_indices: &self.ids[&id],
instructions: &self.instructions,
}
}
/// Returns an iterator over all `Capability` instructions.
#[inline]
pub fn iter_capability(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_capability.clone()].iter()
}
/// Returns an iterator over all `Extension` instructions.
#[inline]
pub fn iter_extension(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_extension.clone()].iter()
}
/// Returns an iterator over all `ExtInstImport` instructions.
#[inline]
pub fn iter_ext_inst_import(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_ext_inst_import.clone()].iter()
}
/// Returns the `MemoryModel` instruction.
#[inline]
pub fn memory_model(&self) -> &Instruction {
&self.instructions[self.memory_model]
}
/// Returns an iterator over all `EntryPoint` instructions.
#[inline]
pub fn iter_entry_point(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_entry_point.clone()].iter()
}
/// Returns an iterator over all execution mode instructions.
#[inline]
pub fn iter_execution_mode(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_execution_mode.clone()].iter()
}
/// Returns an iterator over all name debug instructions.
#[inline]
pub fn iter_name(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_name.clone()].iter()
}
/// Returns an iterator over all decoration instructions.
#[inline]
pub fn iter_decoration(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_decoration.clone()].iter()
}
/// Returns an iterator over all global declaration instructions: types,
/// constants and global variables.
///
/// Note: This can also include `Line` and `NoLine` instructions.
#[inline]
pub fn iter_global(&self) -> impl ExactSizeIterator<Item = &Instruction> {
self.instructions[self.range_global.clone()].iter()
}
}
#[derive(Clone, Debug)]
struct IdDataIndices {
index: usize,
names: Vec<usize>,
decorations: Vec<usize>,
members: Vec<StructMemberDataIndices>,
}
#[derive(Clone, Debug, Default)]
struct StructMemberDataIndices {
names: Vec<usize>,
decorations: Vec<usize>,
}
/// Information associated with an `Id`.
#[derive(Clone, Debug)]
pub struct IdInfo<'a> {
data_indices: &'a IdDataIndices,
instructions: &'a [Instruction],
}
impl<'a> IdInfo<'a> {
/// Returns the instruction that defines this `Id` with a `result_id` operand.
#[inline]
pub fn instruction(&self) -> &'a Instruction {
&self.instructions[self.data_indices.index]
}
/// Returns an iterator over all name debug instructions that target this `Id`.
#[inline]
pub fn iter_name(&self) -> impl ExactSizeIterator<Item = &'a Instruction> {
let instructions = self.instructions;
self.data_indices
.names
.iter()
.map(move |&index| &instructions[index])
}
/// Returns an iterator over all decorate instructions, that target this `Id`. This includes any
/// decorate instructions that target this `Id` indirectly via a `DecorationGroup`.
#[inline]
pub fn iter_decoration(&self) -> impl ExactSizeIterator<Item = &'a Instruction> {
let instructions = self.instructions;
self.data_indices
.decorations
.iter()
.map(move |&index| &instructions[index])
}
/// If this `Id` refers to a `TypeStruct`, returns an iterator of information about each member
/// of the struct. Empty otherwise.
#[inline]
pub fn iter_members(&self) -> impl ExactSizeIterator<Item = StructMemberInfo<'a>> {
let instructions = self.instructions;
self.data_indices
.members
.iter()
.map(move |data_indices| StructMemberInfo {
data_indices,
instructions,
})
}
}
/// Information associated with a member of a `TypeStruct` instruction.
#[derive(Clone, Debug)]
pub struct StructMemberInfo<'a> {
data_indices: &'a StructMemberDataIndices,
instructions: &'a [Instruction],
}
impl<'a> StructMemberInfo<'a> {
/// Returns an iterator over all name debug instructions that target this struct member.
#[inline]
pub fn iter_name(&self) -> impl ExactSizeIterator<Item = &'a Instruction> {
let instructions = self.instructions;
self.data_indices
.names
.iter()
.map(move |&index| &instructions[index])
}
/// Returns an iterator over all decorate instructions that target this struct member. This
/// includes any decorate instructions that target this member indirectly via a
/// `DecorationGroup`.
#[inline]
pub fn iter_decoration(&self) -> impl ExactSizeIterator<Item = &'a Instruction> {
let instructions = self.instructions;
self.data_indices
.decorations
.iter()
.map(move |&index| &instructions[index])
}
}
/// Used in SPIR-V to refer to the result of another instruction.
///
/// Ids are global across a module, and are always assigned by exactly one instruction.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct Id(u32);
impl From<Id> for u32 {
#[inline]
fn from(id: Id) -> u32 {
id.0
}
}
impl Display for Id {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "%{}", self.0)
}
}
/// Helper type for parsing the words of an instruction.
#[derive(Debug)]
struct InstructionReader<'a> {
words: &'a [u32],
next_word: usize,
instruction: usize,
}
impl<'a> InstructionReader<'a> {
/// Constructs a new reader from a slice of words for a single instruction, including the opcode
/// word. `instruction` is the number of the instruction currently being read, and is used for
/// error reporting.
#[inline]
fn new(words: &'a [u32], instruction: usize) -> Self {
debug_assert!(!words.is_empty());
Self {
words,
next_word: 0,
instruction,
}
}
/// Returns whether the reader has reached the end of the current instruction.
#[inline]
fn is_empty(&self) -> bool {
self.next_word >= self.words.len()
}
/// Converts the `ParseErrors` enum to the `ParseError` struct, adding contextual information.
#[inline]
fn map_err(&self, error: ParseErrors) -> ParseError {
ParseError {
instruction: self.instruction,
word: self.next_word - 1, // -1 because the word has already been read
error,
words: self.words.to_owned(),
}
}
/// Returns the next word in the sequence.
#[inline]
fn next_u32(&mut self) -> Result<u32, ParseError> {
let word = *self.words.get(self.next_word).ok_or(ParseError {
instruction: self.instruction,
word: self.next_word, // No -1 because we didn't advance yet
error: ParseErrors::MissingOperands,
words: self.words.to_owned(),
})?;
self.next_word += 1;
Ok(word)
}
/// Returns the next two words as a single `u64`.
#[inline]
fn next_u64(&mut self) -> Result<u64, ParseError> {
Ok(self.next_u32()? as u64 | (self.next_u32()? as u64) << 32)
}
/// Reads a nul-terminated string.
fn next_string(&mut self) -> Result<String, ParseError> {
let mut bytes = Vec::new();
loop {
let word = self.next_u32()?.to_le_bytes();
if let Some(nul) = word.iter().position(|&b| b == 0) {
bytes.extend(&word[0..nul]);
break;
} else {
bytes.extend(word);
}
}
String::from_utf8(bytes).map_err(|err| self.map_err(ParseErrors::FromUtf8Error(err)))
}
/// Reads all remaining words.
#[inline]
fn remainder(&mut self) -> Vec<u32> {
let vec = self.words[self.next_word..].to_owned();
self.next_word = self.words.len();
vec
}
}
/// Error that can happen when reading a SPIR-V module.
#[derive(Clone, Debug)]
pub enum SpirvError {
BadLayout {
index: usize,
},
DuplicateId {
id: Id,
first_index: usize,
second_index: usize,
},
GroupDecorateNotGroup {
index: usize,
},
IdOutOfBounds {
id: Id,
index: usize,
bound: u32,
},
InvalidHeader,
MemoryModelInvalid,
ParseError(ParseError),
}
impl Display for SpirvError {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::BadLayout { index } => write!(
f,
"the instruction at index {} does not follow the logical layout of a module",
index
),
Self::DuplicateId {
id,
first_index,
second_index,
} => write!(
f,
"id {} is assigned more than once, by instructions {} and {}",
id, first_index, second_index
),
Self::GroupDecorateNotGroup { index } => write!(f, "a GroupDecorate or GroupMemberDecorate instruction at index {} referred to an Id that was not a DecorationGroup", index),
Self::IdOutOfBounds { id, bound, index, } => write!(f, "id {}, assigned at instruction {}, is not below the maximum bound {}", id, index, bound),
Self::InvalidHeader => write!(f, "the SPIR-V module header is invalid"),
Self::MemoryModelInvalid => {
write!(f, "the MemoryModel instruction is not present exactly once")
}
Self::ParseError(_) => write!(f, "parse error"),
}
}
}
impl Error for SpirvError {
#[inline]
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::ParseError(err) => Some(err),
_ => None,
}
}
}
impl From<ParseError> for SpirvError {
#[inline]
fn from(err: ParseError) -> Self {
Self::ParseError(err)
}
}
/// Error that can happen when parsing SPIR-V instructions into Rust data structures.
#[derive(Clone, Debug)]
pub struct ParseError {
/// The instruction number the error happened at, starting from 0.
pub instruction: usize,
/// The word from the start of the instruction that the error happened at, starting from 0.
pub word: usize,
/// The error.
pub error: ParseErrors,
/// The words of the instruction.
pub words: Vec<u32>,
}
impl Display for ParseError {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"at instruction {}, word {}: {}",
self.instruction, self.word, self.error
)
}
}
impl Error for ParseError {}
/// Individual types of parse error that can happen.
#[derive(Clone, Debug)]
pub enum ParseErrors {
FromUtf8Error(FromUtf8Error),
LeftoverOperands,
MissingOperands,
UnexpectedEOF,
UnknownEnumerant(&'static str, u32),
UnknownOpcode(u16),
UnknownSpecConstantOpcode(u16),
}
impl Display for ParseErrors {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::FromUtf8Error(err) => write!(f, "invalid UTF-8 in string literal"),
Self::LeftoverOperands => write!(f, "unparsed operands remaining"),
Self::MissingOperands => write!(f, "the instruction and its operands require more words than are present in the instruction"),
Self::UnexpectedEOF => write!(f, "encountered unexpected end of file"),
Self::UnknownEnumerant(ty, enumerant) => write!(f, "invalid enumerant {} for enum {}", enumerant, ty),
Self::UnknownOpcode(opcode) => write!(f, "invalid instruction opcode {}", opcode),
Self::UnknownSpecConstantOpcode(opcode) => write!(f, "invalid spec constant instruction opcode {}", opcode),
}
}
}
| 36.300128 | 185 | 0.54083 |
bf8d27827fbea0d8fc4aebcf694c8d1365898300 | 10,377 | //! Represents AVM1 scope chain resolution.
use crate::avm1::activation::Activation;
use crate::avm1::error::Error;
use crate::avm1::{Object, ScriptObject, TObject, Value};
use enumset::EnumSet;
use gc_arena::{GcCell, MutationContext};
use std::cell::Ref;
/// Indicates what kind of scope a scope is.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum ScopeClass {
/// Scope represents global scope.
Global,
/// Target represents timeline scope. All timeline actions execute with
/// the current clip object in lieu of a local scope, and the timeline scope
/// can be changed via `tellTarget`.
Target,
/// Scope represents local scope and is inherited when a closure is defined.
Local,
/// Scope represents an object added to the scope chain with `with`.
/// It is not inherited when closures are defined.
With,
}
/// Represents a scope chain for an AVM1 activation.
#[derive(Debug)]
pub struct Scope<'gc> {
parent: Option<GcCell<'gc, Scope<'gc>>>,
class: ScopeClass,
values: Object<'gc>,
}
unsafe impl<'gc> gc_arena::Collect for Scope<'gc> {
#[inline]
fn trace(&self, cc: gc_arena::CollectionContext) {
self.parent.trace(cc);
self.values.trace(cc);
}
}
impl<'gc> Scope<'gc> {
/// Construct a global scope (one without a parent).
pub fn from_global_object(globals: Object<'gc>) -> Scope<'gc> {
Scope {
parent: None,
class: ScopeClass::Global,
values: globals,
}
}
/// Construct a child scope of another scope.
pub fn new_local_scope(parent: GcCell<'gc, Self>, mc: MutationContext<'gc, '_>) -> Scope<'gc> {
Scope {
parent: Some(parent),
class: ScopeClass::Local,
values: ScriptObject::object_cell(mc, None),
}
}
/// Construct a closure scope to be used as the parent of all local scopes
/// when invoking a function.
///
/// This function filters With scopes from the scope chain. If all scopes
/// are filtered (somehow), this function constructs and returns a new,
/// single global scope with a bare object.
pub fn new_closure_scope(
mut parent: GcCell<'gc, Self>,
mc: MutationContext<'gc, '_>,
) -> GcCell<'gc, Self> {
let mut bottom_scope = None;
let mut top_scope: Option<GcCell<'gc, Self>> = None;
loop {
if parent.read().class != ScopeClass::With {
let next_scope = GcCell::allocate(
mc,
Self {
parent: None,
class: parent.read().class,
values: parent.read().values,
},
);
if bottom_scope.is_none() {
bottom_scope = Some(next_scope);
}
if let Some(ref scope) = top_scope {
scope.write(mc).parent = Some(next_scope);
}
top_scope = Some(next_scope);
}
let grandparent = parent.read().parent;
if let Some(grandparent) = grandparent {
parent = grandparent;
} else {
break;
}
}
bottom_scope.unwrap_or_else(|| {
GcCell::allocate(
mc,
Self {
parent: None,
class: ScopeClass::Global,
values: ScriptObject::object_cell(mc, None),
},
)
})
}
/// Construct a scope for use with `tellTarget` code where the timeline
/// scope has been replaced with another given object.
pub fn new_target_scope(
mut parent: GcCell<'gc, Self>,
clip: Object<'gc>,
mc: MutationContext<'gc, '_>,
) -> GcCell<'gc, Self> {
let mut bottom_scope = None;
let mut top_scope: Option<GcCell<'gc, Self>> = None;
loop {
let next_scope = GcCell::allocate(
mc,
Self {
parent: None,
class: parent.read().class,
values: parent.read().values,
},
);
if parent.read().class == ScopeClass::Target {
next_scope.write(mc).values = clip;
}
if bottom_scope.is_none() {
bottom_scope = Some(next_scope);
}
if let Some(ref scope) = top_scope {
scope.write(mc).parent = Some(next_scope);
}
top_scope = Some(next_scope);
let grandparent = parent.read().parent;
if let Some(grandparent) = grandparent {
parent = grandparent;
} else {
break;
}
}
bottom_scope.unwrap_or_else(|| {
GcCell::allocate(
mc,
Self {
parent: None,
class: ScopeClass::Global,
values: ScriptObject::object_cell(mc, None),
},
)
})
}
/// Construct a with scope to be used as the scope during a with block.
///
/// A with block adds an object to the top of the scope chain, so unqualified
/// references will try to resolve on that object first.
pub fn new_with_scope(
parent_scope: GcCell<'gc, Self>,
with_object: Object<'gc>,
mc: MutationContext<'gc, '_>,
) -> GcCell<'gc, Self> {
GcCell::allocate(
mc,
Scope {
parent: Some(parent_scope),
class: ScopeClass::With,
values: with_object,
},
)
}
/// Construct an arbitrary scope
pub fn new(
parent: GcCell<'gc, Self>,
class: ScopeClass,
with_object: Object<'gc>,
) -> Scope<'gc> {
Scope {
parent: Some(parent),
class,
values: with_object,
}
}
/// Returns a reference to the current local scope object.
pub fn locals(&self) -> &Object<'gc> {
&self.values
}
/// Returns a reference to the current local scope object.
pub fn locals_cell(&self) -> Object<'gc> {
self.values
}
/// Returns a reference to the current local scope object for mutation.
#[allow(dead_code)]
pub fn locals_mut(&mut self) -> &mut Object<'gc> {
&mut self.values
}
/// Returns a reference to the parent scope object.
pub fn parent(&self) -> Option<Ref<Scope<'gc>>> {
match self.parent {
Some(ref p) => Some(p.read()),
None => None,
}
}
/// Returns a reference to the parent scope object.
pub fn parent_cell(&self) -> Option<GcCell<'gc, Scope<'gc>>> {
self.parent
}
/// Resolve a particular value in the scope chain.
///
/// Because scopes are object chains, the same rules for `Object::get`
/// still apply here. This function is allowed to yield `None` to indicate
/// that the result will be calculated on the AVM stack.
pub fn resolve(
&self,
name: &str,
activation: &mut Activation<'_, 'gc, '_>,
this: Object<'gc>,
) -> Result<Value<'gc>, Error<'gc>> {
if self.locals().has_property(activation, name) {
return self.locals().get(name, activation);
}
if let Some(scope) = self.parent() {
return scope.resolve(name, activation, this);
}
//TODO: Should undefined variables halt execution?
Ok(Value::Undefined)
}
/// Check if a particular property in the scope chain is defined.
pub fn is_defined(&self, activation: &mut Activation<'_, 'gc, '_>, name: &str) -> bool {
if self.locals().has_property(activation, name) {
return true;
}
if let Some(scope) = self.parent() {
return scope.is_defined(activation, name);
}
false
}
/// Update a particular value in the scope chain.
///
/// Traverses the scope chain in search of a value. If it's found, it's overwritten.
/// The traversal stops at Target scopes, which represents the movie clip timeline
/// the code is executing in.
/// If the value is not found, it is defined on this Target scope.
pub fn set(
&self,
name: &str,
value: Value<'gc>,
activation: &mut Activation<'_, 'gc, '_>,
this: Object<'gc>,
) -> Result<(), Error<'gc>> {
if self.class == ScopeClass::Target || self.locals().has_property(activation, name) {
// Value found on this object, so overwrite it.
// Or we've hit the executing movie clip, so create it here.
self.locals().set(name, value, activation)
} else if let Some(scope) = self.parent() {
// Traverse the scope chain in search of the value.
scope.set(name, value, activation, this)
} else {
// This probably shouldn't happen -- all AVM1 code runs in reference to some movieclip,
// so we should always have a movieclip scope.
// Define on the top-level scope.
debug_assert!(false, "Scope::set: No top-level movie clip scope");
self.locals().set(name, value, activation)
}
}
/// Set a particular value in the locals for this scope.
///
/// By convention, the locals for a given function are always defined as
/// stored (e.g. not virtual) properties on the lowest object in the scope
/// chain. As a result, this function always force sets a property on the
/// local object and does not traverse the scope chain.
pub fn define(&self, name: &str, value: impl Into<Value<'gc>>, mc: MutationContext<'gc, '_>) {
self.locals()
.define_value(mc, name, value.into(), EnumSet::empty());
}
/// Delete a value from scope
pub fn delete(&self, activation: &mut Activation<'_, 'gc, '_>, name: &str) -> bool {
if self.locals().has_property(activation, name) {
return self.locals().delete(activation, name);
}
if let Some(scope) = self.parent() {
return scope.delete(activation, name);
}
false
}
}
| 32.226708 | 99 | 0.546304 |
9c58abbd3822a8860399da95f33999c30efc9a47 | 654 | mod image_util;
use image_util::{DogImage, AppState};
//TODO: More refactoring
use druid::{AppLauncher, WindowDesc};
use std::path::Path;
use std::fs::create_dir;
fn main() {
let dogs_save_dir = Path::new("Dogs/");
if !dogs_save_dir.exists() {
create_dir(dogs_save_dir).expect("cant create saving dir");
}
let main_window = WindowDesc::new(image_util::make_ui)
.window_size((650., 450.))
.title("Random Dog on druid");
let state = AppState {
breed: String::from("any"),
dog_image: DogImage::new(&String::from("any")),
};
AppLauncher::with_window(main_window)
.launch(state)
.expect("Faild to launc app");
}
| 23.357143 | 63 | 0.669725 |
393ce196bd82855e24b77097cde4d9e87b105a6d | 28,809 | use std::io::{Error, ErrorKind};
use std::str::FromStr;
use aws_config::provider_config::ProviderConfig;
use aws_sdk_s3::model::{
BucketLocationConstraint, CreateBucketConfiguration, Delete, Object, ObjectIdentifier,
};
use aws_sdk_s3::types::ByteStream;
use aws_sdk_s3::{Client, Endpoint as SdkEndpoint};
use aws_types::os_shim_internal::Env;
use chrono::{Duration, Utc};
use log::{error, info};
use crate::cli::DumpDeleteArgs;
use crate::config::Endpoint;
use crate::connector::Connector;
use crate::datastore::s3::S3Error::FailedObjectUpload;
use crate::datastore::{
compress, decompress, decrypt, encrypt, Backup, Datastore, IndexFile, ReadOptions,
};
use crate::runtime::block_on;
use crate::types::Bytes;
use crate::utils::epoch_millis;
const INDEX_FILE_NAME: &str = "metadata.json";
pub struct S3 {
bucket: String,
root_key: String,
region: String,
client: Client,
enable_compression: bool,
encryption_key: Option<String>,
}
impl S3 {
pub fn new<S: Into<String>>(
bucket: S,
region: S,
access_key_id: S,
secret_access_key: S,
endpoint: Endpoint,
) -> Self {
let access_key_id = access_key_id.into();
let secret_access_key = secret_access_key.into();
let region = region.into();
let sdk_config = block_on(
aws_config::from_env()
.configure(ProviderConfig::default().with_env(Env::from_slice(&[
("AWS_ACCESS_KEY_ID", access_key_id.as_str()),
("AWS_SECRET_ACCESS_KEY", secret_access_key.as_str()),
("AWS_REGION", region.as_str()),
])))
.load(),
);
let s3_config_builder = aws_sdk_s3::config::Builder::from(&sdk_config);
let s3_config = match endpoint {
Endpoint::Default => s3_config_builder.build(),
Endpoint::Custom(url) => match http::Uri::from_str(url.as_str()) {
Ok(uri) => s3_config_builder
.endpoint_resolver(SdkEndpoint::immutable(uri))
.build(),
Err(_) => s3_config_builder.build(),
},
};
S3 {
bucket: bucket.into().to_string(),
root_key: format!("backup-{}", epoch_millis()),
region,
client: Client::from_conf(s3_config),
enable_compression: true,
encryption_key: None,
}
}
fn create_index_file(&self) -> Result<IndexFile, Error> {
match self.index_file() {
Ok(index_file) => Ok(index_file),
Err(_) => {
let index_file = IndexFile { backups: vec![] };
let _ = self.write_index_file(&index_file)?;
Ok(index_file)
}
}
}
}
impl Connector for S3 {
fn init(&mut self) -> Result<(), Error> {
let _ = create_bucket(&self.client, self.bucket.as_str(), self.region.as_str())?;
self.create_index_file().map(|_| ())
}
}
impl Datastore for S3 {
fn index_file(&self) -> Result<IndexFile, Error> {
let object = get_object(&self.client, self.bucket.as_str(), INDEX_FILE_NAME)?;
let index_file: IndexFile = serde_json::from_slice(object.as_slice())?;
Ok(index_file)
}
fn write_index_file(&self, index_file: &IndexFile) -> Result<(), Error> {
let index_file_json = serde_json::to_vec(index_file)?;
create_object(
&self.client,
self.bucket.as_str(),
INDEX_FILE_NAME,
index_file_json,
)
.map_err(|err| Error::from(err))
}
fn write(&self, file_part: u16, data: Bytes) -> Result<(), Error> {
// compress data?
let data = if self.enable_compression {
compress(data)?
} else {
data
};
// encrypt data?
let data = match &self.encryption_key {
Some(key) => encrypt(data, key.as_str())?,
None => data,
};
let data_size = data.len();
let key = format!("{}/{}.dump", self.root_key.as_str(), file_part);
info!("upload object '{}' part {} on", key.as_str(), file_part);
let _ = create_object(&self.client, self.bucket.as_str(), key.as_str(), data)?;
// update index file
let mut index_file = self.index_file()?;
let mut new_backup = Backup {
directory_name: self.root_key.clone(),
size: 0,
created_at: epoch_millis(),
compressed: self.enable_compression,
encrypted: self.encryption_key.is_some(),
};
// find or create Backup
let mut backup = index_file
.backups
.iter_mut()
.find(|b| b.directory_name.as_str() == self.root_key.as_str())
.unwrap_or(&mut new_backup);
if backup.size == 0 {
// it means it's a new backup.
// We need to add it into the index_file.backups
new_backup.size = data_size;
index_file.backups.push(new_backup);
} else {
// update total backup size
backup.size = backup.size + data_size;
}
// save index file
self.write_index_file(&index_file)
}
fn read<'a, F>(&self, options: &ReadOptions, mut data_callback: F) -> Result<(), Error>
where
F: FnMut(Bytes),
{
let mut index_file = self.index_file()?;
let backup = index_file.find_backup(options)?;
for object in list_objects(
&self.client,
self.bucket.as_str(),
Some(backup.directory_name.as_str()),
)? {
let data = get_object(&self.client, self.bucket.as_str(), object.key().unwrap())?;
// decrypt data?
let data = if backup.encrypted {
// It should be safe to unwrap here because the backup is marked as encrypted in the backup manifest
// so if there is no encryption key set at the datastore level we want to panic.
let encryption_key = self.encryption_key.as_ref().unwrap();
decrypt(data, encryption_key.as_str())?
} else {
data
};
// decompress data?
let data = if backup.compressed {
decompress(data)?
} else {
data
};
data_callback(data);
}
Ok(())
}
fn set_encryption_key(&mut self, key: String) {
self.encryption_key = Some(key);
}
fn set_compression(&mut self, enable: bool) {
self.enable_compression = enable;
}
fn set_backup_name(&mut self, name: String) {
self.root_key = name;
}
fn delete(&self, args: &DumpDeleteArgs) -> Result<(), Error> {
if let Some(backup_name) = &args.dump {
return delete_by_name(&self, backup_name.as_str());
}
if let Some(older_than) = &args.older_than {
let days = match older_than.chars().nth_back(0) {
Some('d') => {
// remove the last character which corresponds to the unit
let mut older_than = older_than.to_string();
older_than.pop();
match older_than.parse::<i64>() {
Ok(days) => days,
Err(err) => return Err(Error::new(
ErrorKind::Other,
format!("command error: {} - invalid `--older-than` format. Use `--older-than=14d`", err),
)),
}
}
_ => {
return Err(Error::new(
ErrorKind::Other,
"command error: invalid `--older-than` format. Use `--older-than=14d`",
))
}
};
return delete_older_than(&self, days);
}
if let Some(keep_last) = args.keep_last {
return delete_keep_last(&self, keep_last);
}
Err(Error::new(
ErrorKind::Other,
"command error: parameters or options required",
))
}
}
fn delete_older_than(datastore: &S3, days: i64) -> Result<(), Error> {
let index_file = datastore.index_file()?;
let threshold_date = Utc::now() - Duration::days(days);
let threshold_date = threshold_date.timestamp_millis() as u128;
let backups_to_delete: Vec<Backup> = index_file
.backups
.into_iter()
.filter(|b| b.created_at.lt(&threshold_date))
.collect();
for backup in backups_to_delete {
delete_by_name(&datastore, backup.directory_name.as_str())?
}
Ok(())
}
fn delete_keep_last(datastore: &S3, keep_last: usize) -> Result<(), Error> {
let mut index_file = datastore.index_file()?;
index_file
.backups
.sort_by(|a, b| b.created_at.cmp(&a.created_at));
if let Some(backups) = index_file.backups.get(keep_last..) {
for backup in backups {
delete_by_name(&datastore, backup.directory_name.as_str())?;
}
}
Ok(())
}
fn delete_by_name(datastore: &S3, backup_name: &str) -> Result<(), Error> {
let mut index_file = datastore.index_file()?;
let bucket = &datastore.bucket;
let _ =
delete_directory(&datastore.client, bucket, backup_name).map_err(|err| Error::from(err))?;
index_file
.backups
.retain(|b| b.directory_name != backup_name);
datastore.write_index_file(&index_file)
}
#[derive(Debug, Eq, PartialEq)]
enum S3Error<'a> {
FailedToCreateBucket { bucket: &'a str },
FailedToDeleteBucket { bucket: &'a str },
FailedToListObjects { bucket: &'a str },
ObjectDoesNotExist { bucket: &'a str, key: &'a str },
FailedObjectDownload { bucket: &'a str, key: &'a str },
FailedObjectUpload { bucket: &'a str, key: &'a str },
FailedToDeleteObject { bucket: &'a str, key: &'a str },
FailedToDeleteDirectory { bucket: &'a str, directory: &'a str },
}
impl<'a> From<S3Error<'a>> for Error {
fn from(err: S3Error<'a>) -> Self {
match err {
S3Error::FailedToCreateBucket { bucket } => Error::new(
ErrorKind::Other,
format!("failed to create bucket '{}'", bucket),
),
S3Error::FailedToDeleteBucket { bucket } => Error::new(
ErrorKind::Other,
format!("failed to delete bucket '{}'", bucket),
),
S3Error::FailedToListObjects { bucket } => Error::new(
ErrorKind::Other,
format!("failed to list objects from bucket '{}'", bucket),
),
S3Error::ObjectDoesNotExist {
bucket,
key: object,
} => Error::new(
ErrorKind::Other,
format!("object '{}/{}' does not exist", bucket, object),
),
S3Error::FailedObjectDownload {
bucket,
key: object,
} => Error::new(
ErrorKind::Other,
format!("failed to download object '{}/{}'", bucket, object),
),
FailedObjectUpload {
bucket,
key: object,
} => Error::new(
ErrorKind::Other,
format!("failed to upload object '{}/{}'", bucket, object),
),
S3Error::FailedToDeleteObject {
bucket,
key: object,
} => Error::new(
ErrorKind::Other,
format!("failed to delete object '{}/{}'", bucket, object),
),
S3Error::FailedToDeleteDirectory { bucket, directory } => Error::new(
ErrorKind::Other,
format!("failed to delete directory '{}/{}'", bucket, directory),
),
}
}
}
fn create_bucket<'a>(client: &Client, bucket: &'a str, region: &str) -> Result<(), S3Error<'a>> {
let constraint = BucketLocationConstraint::from(region);
let cfg = CreateBucketConfiguration::builder()
.location_constraint(constraint)
.build();
if let Ok(_) = block_on(
client
.get_bucket_accelerate_configuration()
.bucket(bucket)
.send(),
) {
info!("bucket {} exists", bucket);
return Ok(());
}
let result = block_on(
client
.create_bucket()
.create_bucket_configuration(cfg)
.bucket(bucket)
.send(),
);
match result {
Ok(_) => {}
Err(err) => {
error!("{}", err);
return Err(S3Error::FailedToCreateBucket { bucket });
}
}
info!("bucket {} created", bucket);
Ok(())
}
fn delete_bucket<'a>(client: &Client, bucket: &'a str, force: bool) -> Result<(), S3Error<'a>> {
if force {
for object in list_objects(client, bucket, None)? {
let _ = delete_object(client, bucket, object.key().unwrap_or(""));
}
}
let result = block_on(client.delete_bucket().bucket(bucket).send());
match result {
Ok(_) => {}
Err(err) => {
error!("{}", err);
return Err(S3Error::FailedToDeleteBucket { bucket });
}
}
info!("bucket {} created", bucket);
Ok(())
}
fn create_object<'a>(
client: &Client,
bucket: &'a str,
key: &'a str,
object: Vec<u8>,
) -> Result<(), S3Error<'a>> {
let result = block_on(
client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from(object))
// TODO: set metadata etag to validate upload on the S3 side
.send(),
);
if let Err(_) = result {
return Err(S3Error::FailedObjectUpload { bucket, key });
}
Ok(())
}
fn get_object<'a>(client: &Client, bucket: &'a str, key: &'a str) -> Result<Vec<u8>, S3Error<'a>> {
let result = block_on(client.get_object().bucket(bucket).key(key).send());
match result {
Ok(file) => match block_on(file.body.collect()) {
Ok(data) => Ok(data.into_bytes().to_vec()),
Err(_) => Err(S3Error::FailedObjectDownload { bucket, key }),
},
Err(_) => Err(S3Error::ObjectDoesNotExist { bucket, key }),
}
}
fn list_objects<'a>(
client: &Client,
bucket: &'a str,
path: Option<&'a str>,
) -> Result<Vec<Object>, S3Error<'a>> {
let objects = block_on(client.list_objects_v2().bucket(bucket).send());
let objects = match objects {
Ok(objects) => objects,
Err(err) => {
error!("{}", err);
return Err(S3Error::FailedToListObjects { bucket });
}
};
// FIXME max objects listed is 1000 -> pagination?
let objects = objects.contents.unwrap_or(Vec::new());
if path.is_none() {
return Ok(objects);
}
let path = path.unwrap();
let mut objects = objects
.into_iter()
.filter(|object| match object.key() {
Some(key) => key.starts_with(path),
None => false,
})
.collect::<Vec<_>>();
// sort by key
objects.sort_by(|a, b| a.key.cmp(&b.key));
Ok(objects)
}
fn delete_object<'a>(client: &Client, bucket: &'a str, key: &'a str) -> Result<(), S3Error<'a>> {
let _ = get_object(client, bucket, key)?;
let result = block_on(client.delete_object().bucket(bucket).key(key).send());
match result {
Ok(_) => Ok(()),
Err(_) => Err(S3Error::FailedToDeleteObject { bucket, key }),
}
}
fn delete_directory<'a>(
client: &Client,
bucket: &'a str,
directory: &'a str,
) -> Result<(), S3Error<'a>> {
if let Ok(objects) = block_on(
client
.list_objects_v2()
.bucket(bucket)
.prefix(directory)
.send(),
) {
let mut delete_objects: Vec<ObjectIdentifier> = vec![];
for obj in objects.contents().unwrap_or_default() {
let obj_id = ObjectIdentifier::builder()
.set_key(Some(obj.key().unwrap().to_string()))
.build();
delete_objects.push(obj_id);
}
match block_on(
client
.delete_objects()
.bucket(bucket)
.delete(Delete::builder().set_objects(Some(delete_objects)).build())
.send(),
) {
Ok(_) => Ok(()),
Err(err) => {
eprintln!("{}", err);
Err(S3Error::FailedToDeleteDirectory { bucket, directory })
}
}
} else {
Err(S3Error::FailedToListObjects { bucket })
}
}
#[cfg(test)]
mod tests {
use chrono::{Duration, Utc};
use fake::{Fake, Faker};
use crate::cli::DumpDeleteArgs;
use crate::config::Endpoint;
use crate::connector::Connector;
use crate::datastore::s3::{create_object, delete_bucket, delete_object, get_object, S3Error};
use crate::datastore::{Backup, Datastore};
use crate::utils::epoch_millis;
use crate::S3;
const BUCKET_NAME: &str = "replibyte-test";
const REGION: &str = "us-east-2";
const MINIO_ENDPOINT: &str = "http://localhost:9000";
const MINIO_CREDENTIALS: &str = "minioadmin";
fn bucket() -> String {
format!("replibyte-test-{}", Faker.fake::<String>().to_lowercase())
}
fn credentials() -> (String, String) {
(
std::env::var("AWS_ACCESS_KEY_ID").unwrap_or(MINIO_CREDENTIALS.to_string()),
std::env::var("AWS_SECRET_ACCESS_KEY").unwrap_or(MINIO_CREDENTIALS.to_string()),
)
}
fn s3(bucket: &str) -> S3 {
let (access_key_id, secret_access_key) = credentials();
S3::new(
bucket.to_string(),
"us-east-2".to_string(),
access_key_id,
secret_access_key,
Endpoint::Custom(MINIO_ENDPOINT.to_string()),
)
}
#[test]
fn init_s3() {
let bucket = bucket();
let mut s3 = s3(bucket.as_str());
// executed twice to check that there is no error at the second call
assert!(s3.init().is_ok());
assert!(s3.init().is_ok());
assert!(delete_bucket(&s3.client, bucket.as_str(), true).is_ok());
}
#[test]
fn create_and_get_and_delete_object() {
let bucket = bucket();
let mut s3 = s3(bucket.as_str());
let _ = s3.init().expect("s3 init failed");
let key = format!("testing-object-{}", Faker.fake::<String>());
assert_eq!(
get_object(&s3.client, bucket.as_str(), key.as_str())
.err()
.unwrap(),
S3Error::ObjectDoesNotExist {
bucket: bucket.as_str(),
key: key.as_str(),
}
);
assert!(create_object(
&s3.client,
bucket.as_str(),
key.as_str(),
b"hello w0rld".to_vec(),
)
.is_ok());
assert_eq!(
get_object(&s3.client, bucket.as_str(), key.as_str()).unwrap(),
b"hello w0rld"
);
// check that the object is updated
assert!(create_object(
&s3.client,
bucket.as_str(),
key.as_str(),
b"hello w0rld updated".to_vec(),
)
.is_ok());
assert_eq!(
get_object(&s3.client, bucket.as_str(), key.as_str()).unwrap(),
b"hello w0rld updated"
);
assert!(delete_object(&s3.client, bucket.as_str(), key.as_str()).is_ok());
assert_eq!(
delete_object(&s3.client, bucket.as_str(), key.as_str())
.err()
.unwrap(),
S3Error::ObjectDoesNotExist {
bucket: bucket.as_str(),
key: key.as_str(),
}
);
assert_eq!(
get_object(&s3.client, bucket.as_str(), key.as_str())
.err()
.unwrap(),
S3Error::ObjectDoesNotExist {
bucket: bucket.as_str(),
key: key.as_str(),
}
);
assert!(delete_bucket(&s3.client, bucket.as_str(), true).is_ok());
}
#[test]
fn test_s3_index_file() {
let bucket = bucket();
let mut s3 = s3(bucket.as_str());
let _ = s3.init().expect("s3 init failed");
assert!(s3.index_file().is_ok());
let mut index_file = s3.index_file().unwrap();
assert!(index_file.backups.is_empty());
index_file.backups.push(Backup {
directory_name: "backup-1".to_string(),
size: 0,
created_at: epoch_millis(),
compressed: true,
encrypted: false,
});
assert!(s3.write_index_file(&index_file).is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 1);
assert!(delete_bucket(&s3.client, bucket.as_str(), true).is_ok());
}
#[test]
fn test_backup_name() {
let bucket = bucket();
let mut s3 = s3(bucket.as_str());
s3.set_backup_name("custom-backup-name".to_string());
assert_eq!(s3.root_key, "custom-backup-name".to_string())
}
#[test]
fn test_s3_backup_delete_by_name() {
let bucket = bucket();
let mut s3 = s3(bucket.as_str());
let _ = s3.init().expect("s3 init failed");
assert!(s3.index_file().is_ok());
let mut index_file = s3.index_file().unwrap();
assert!(index_file.backups.is_empty());
// Add 2 backups in the manifest
index_file.backups.push(Backup {
directory_name: "backup-1".to_string(),
size: 0,
created_at: epoch_millis(),
compressed: true,
encrypted: false,
});
index_file.backups.push(Backup {
directory_name: "backup-2".to_string(),
size: 0,
created_at: epoch_millis(),
compressed: true,
encrypted: false,
});
assert!(s3.write_index_file(&index_file).is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 2);
assert!(create_object(
&s3.client,
bucket.as_str(),
"backup-1/testing-key.dump",
b"hello w0rld".to_vec(),
)
.is_ok());
assert!(create_object(
&s3.client,
bucket.as_str(),
"backup-2/testing-key.dump",
b"hello w0rld".to_vec(),
)
.is_ok());
assert!(s3
.delete(&DumpDeleteArgs {
dump: Some("backup-1".to_string()),
older_than: None,
keep_last: None
})
.is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 1);
assert!(get_object(&s3.client, bucket.as_str(), "backup-1/testing-key.dump").is_err());
assert!(get_object(&s3.client, bucket.as_str(), "backup-2/testing-key.dump").is_ok());
assert!(s3
.delete(&DumpDeleteArgs {
dump: Some("backup-2".to_string()),
older_than: None,
keep_last: None
})
.is_ok());
assert!(s3.index_file().unwrap().backups.is_empty());
assert!(get_object(&s3.client, bucket.as_str(), "backup-2/testing-key.dump").is_err());
}
#[test]
fn test_s3_backup_delete_older_than() {
let bucket = bucket();
let mut s3 = s3(bucket.as_str());
let _ = s3.init().expect("s3 init failed");
assert!(s3.index_file().is_ok());
let mut index_file = s3.index_file().unwrap();
assert!(index_file.backups.is_empty());
// Add a backup from 5 days ago
index_file.backups.push(Backup {
directory_name: "backup-1".to_string(),
size: 0,
created_at: (Utc::now() - Duration::days(5)).timestamp_millis() as u128,
compressed: true,
encrypted: false,
});
// Add a backup from now
index_file.backups.push(Backup {
directory_name: "backup-2".to_string(),
size: 0,
created_at: epoch_millis(),
compressed: true,
encrypted: false,
});
assert!(s3.write_index_file(&index_file).is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 2);
assert!(create_object(
&s3.client,
bucket.as_str(),
"backup-1/testing-key.dump",
b"hello w0rld".to_vec(),
)
.is_ok());
assert!(create_object(
&s3.client,
bucket.as_str(),
"backup-2/testing-key.dump",
b"hello w0rld".to_vec(),
)
.is_ok());
assert!(s3
.delete(&DumpDeleteArgs {
dump: None,
older_than: Some("6d".to_string()),
keep_last: None
})
.is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 2);
assert!(get_object(&s3.client, bucket.as_str(), "backup-1/testing-key.dump").is_ok());
assert!(get_object(&s3.client, bucket.as_str(), "backup-2/testing-key.dump").is_ok());
assert!(s3
.delete(&DumpDeleteArgs {
dump: None,
older_than: Some("5d".to_string()),
keep_last: None
})
.is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 1);
assert!(get_object(&s3.client, bucket.as_str(), "backup-1/testing-key.dump").is_err());
assert!(get_object(&s3.client, bucket.as_str(), "backup-2/testing-key.dump").is_ok());
}
#[test]
fn test_s3_backup_keep_last() {
let bucket = bucket();
let mut s3 = s3(bucket.as_str());
let _ = s3.init().expect("s3 init failed");
assert!(s3.index_file().is_ok());
let mut index_file = s3.index_file().unwrap();
assert!(index_file.backups.is_empty());
index_file.backups.push(Backup {
directory_name: "backup-1".to_string(),
size: 0,
created_at: (Utc::now() - Duration::days(3)).timestamp_millis() as u128,
compressed: true,
encrypted: false,
});
index_file.backups.push(Backup {
directory_name: "backup-2".to_string(),
size: 0,
created_at: (Utc::now() - Duration::days(5)).timestamp_millis() as u128,
compressed: true,
encrypted: false,
});
index_file.backups.push(Backup {
directory_name: "backup-3".to_string(),
size: 0,
created_at: epoch_millis(),
compressed: true,
encrypted: false,
});
assert!(s3.write_index_file(&index_file).is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 3);
assert!(create_object(
&s3.client,
bucket.as_str(),
"backup-1/testing-key.dump",
b"hello w0rld".to_vec(),
)
.is_ok());
assert!(create_object(
&s3.client,
bucket.as_str(),
"backup-2/testing-key.dump",
b"hello w0rld".to_vec(),
)
.is_ok());
assert!(create_object(
&s3.client,
bucket.as_str(),
"backup-3/testing-key.dump",
b"hello w0rld".to_vec(),
)
.is_ok());
assert!(s3
.delete(&DumpDeleteArgs {
dump: None,
older_than: None,
keep_last: Some(2)
})
.is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 2);
assert!(get_object(&s3.client, bucket.as_str(), "backup-1/testing-key.dump").is_ok());
assert!(get_object(&s3.client, bucket.as_str(), "backup-2/testing-key.dump").is_err());
assert!(get_object(&s3.client, bucket.as_str(), "backup-3/testing-key.dump").is_ok());
assert!(s3
.delete(&DumpDeleteArgs {
dump: None,
older_than: None,
keep_last: Some(1)
})
.is_ok());
assert_eq!(s3.index_file().unwrap().backups.len(), 1);
assert!(get_object(&s3.client, bucket.as_str(), "backup-1/testing-key.dump").is_err());
assert!(get_object(&s3.client, bucket.as_str(), "backup-2/testing-key.dump").is_err());
assert!(get_object(&s3.client, bucket.as_str(), "backup-3/testing-key.dump").is_ok());
}
}
| 30.261555 | 118 | 0.527856 |
26f78b9c5c78441b9089b2942fe1e8a34196120e | 14,620 | //! The main parser interface.
use crate::ast::{self, CrateConfig, NodeId};
use crate::early_buffered_lints::{BufferedEarlyLint, BufferedEarlyLintId};
use crate::source_map::{SourceMap, FilePathMapping};
use crate::feature_gate::UnstableFeatures;
use crate::parse::parser::Parser;
use crate::parse::parser::emit_unclosed_delims;
use crate::parse::token::TokenKind;
use crate::tokenstream::{TokenStream, TokenTree};
use crate::diagnostics::plugin::ErrorMap;
use crate::print::pprust;
use crate::symbol::Symbol;
use errors::{Applicability, FatalError, Level, Handler, ColorConfig, Diagnostic, DiagnosticBuilder};
use rustc_data_structures::sync::{Lrc, Lock, Once};
use syntax_pos::{Span, SourceFile, FileName, MultiSpan};
use syntax_pos::edition::Edition;
use syntax_pos::hygiene::ExpnId;
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use std::str;
#[cfg(test)]
mod tests;
#[macro_use]
pub mod parser;
pub mod attr;
pub mod lexer;
pub mod token;
crate mod classify;
crate mod diagnostics;
crate mod literal;
crate mod unescape_error_reporting;
pub type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
/// Info about a parsing session.
pub struct ParseSess {
pub span_diagnostic: Handler,
pub unstable_features: UnstableFeatures,
pub config: CrateConfig,
pub edition: Edition,
pub missing_fragment_specifiers: Lock<FxHashSet<Span>>,
/// Places where raw identifiers were used. This is used for feature-gating raw identifiers.
pub raw_identifier_spans: Lock<Vec<Span>>,
/// The registered diagnostics codes.
crate registered_diagnostics: Lock<ErrorMap>,
/// Used to determine and report recursive module inclusions.
included_mod_stack: Lock<Vec<PathBuf>>,
source_map: Lrc<SourceMap>,
pub buffered_lints: Lock<Vec<BufferedEarlyLint>>,
/// Contains the spans of block expressions that could have been incomplete based on the
/// operation token that followed it, but that the parser cannot identify without further
/// analysis.
pub ambiguous_block_expr_parse: Lock<FxHashMap<Span, Span>>,
pub param_attr_spans: Lock<Vec<Span>>,
// Places where `let` exprs were used and should be feature gated according to `let_chains`.
pub let_chains_spans: Lock<Vec<Span>>,
// Places where `async || ..` exprs were used and should be feature gated.
pub async_closure_spans: Lock<Vec<Span>>,
pub injected_crate_name: Once<Symbol>,
}
impl ParseSess {
pub fn new(file_path_mapping: FilePathMapping) -> Self {
let cm = Lrc::new(SourceMap::new(file_path_mapping));
let handler = Handler::with_tty_emitter(ColorConfig::Auto,
true,
None,
Some(cm.clone()));
ParseSess::with_span_handler(handler, cm)
}
pub fn with_span_handler(handler: Handler, source_map: Lrc<SourceMap>) -> ParseSess {
ParseSess {
span_diagnostic: handler,
unstable_features: UnstableFeatures::from_environment(),
config: FxHashSet::default(),
missing_fragment_specifiers: Lock::new(FxHashSet::default()),
raw_identifier_spans: Lock::new(Vec::new()),
registered_diagnostics: Lock::new(ErrorMap::new()),
included_mod_stack: Lock::new(vec![]),
source_map,
buffered_lints: Lock::new(vec![]),
edition: ExpnId::root().expn_data().edition,
ambiguous_block_expr_parse: Lock::new(FxHashMap::default()),
param_attr_spans: Lock::new(Vec::new()),
let_chains_spans: Lock::new(Vec::new()),
async_closure_spans: Lock::new(Vec::new()),
injected_crate_name: Once::new(),
}
}
#[inline]
pub fn source_map(&self) -> &SourceMap {
&self.source_map
}
pub fn buffer_lint<S: Into<MultiSpan>>(&self,
lint_id: BufferedEarlyLintId,
span: S,
id: NodeId,
msg: &str,
) {
self.buffered_lints.with_lock(|buffered_lints| {
buffered_lints.push(BufferedEarlyLint{
span: span.into(),
id,
msg: msg.into(),
lint_id,
});
});
}
/// Extend an error with a suggestion to wrap an expression with parentheses to allow the
/// parser to continue parsing the following operation as part of the same expression.
pub fn expr_parentheses_needed(
&self,
err: &mut DiagnosticBuilder<'_>,
span: Span,
alt_snippet: Option<String>,
) {
if let Some(snippet) = self.source_map().span_to_snippet(span).ok().or(alt_snippet) {
err.span_suggestion(
span,
"parentheses are required to parse this as an expression",
format!("({})", snippet),
Applicability::MachineApplicable,
);
}
}
}
#[derive(Clone)]
pub struct Directory<'a> {
pub path: Cow<'a, Path>,
pub ownership: DirectoryOwnership,
}
#[derive(Copy, Clone)]
pub enum DirectoryOwnership {
Owned {
// None if `mod.rs`, `Some("foo")` if we're in `foo.rs`
relative: Option<ast::Ident>,
},
UnownedViaBlock,
UnownedViaMod(bool /* legacy warnings? */),
}
// a bunch of utility functions of the form parse_<thing>_from_<source>
// where <thing> includes crate, expr, item, stmt, tts, and one that
// uses a HOF to parse anything, and <source> includes file and
// source_str.
pub fn parse_crate_from_file<'a>(input: &Path, sess: &'a ParseSess) -> PResult<'a, ast::Crate> {
let mut parser = new_parser_from_file(sess, input);
parser.parse_crate_mod()
}
pub fn parse_crate_attrs_from_file<'a>(input: &Path, sess: &'a ParseSess)
-> PResult<'a, Vec<ast::Attribute>> {
let mut parser = new_parser_from_file(sess, input);
parser.parse_inner_attributes()
}
pub fn parse_crate_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<'_, ast::Crate> {
new_parser_from_source_str(sess, name, source).parse_crate_mod()
}
pub fn parse_crate_attrs_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<'_, Vec<ast::Attribute>> {
new_parser_from_source_str(sess, name, source).parse_inner_attributes()
}
pub fn parse_stream_from_source_str(
name: FileName,
source: String,
sess: &ParseSess,
override_span: Option<Span>,
) -> TokenStream {
let (stream, mut errors) = source_file_to_stream(
sess,
sess.source_map().new_source_file(name, source),
override_span,
);
emit_unclosed_delims(&mut errors, &sess.span_diagnostic);
stream
}
/// Creates a new parser from a source string.
pub fn new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String) -> Parser<'_> {
panictry_buffer!(&sess.span_diagnostic, maybe_new_parser_from_source_str(sess, name, source))
}
/// Creates a new parser from a source string. Returns any buffered errors from lexing the initial
/// token stream.
pub fn maybe_new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String)
-> Result<Parser<'_>, Vec<Diagnostic>>
{
let mut parser = maybe_source_file_to_parser(sess,
sess.source_map().new_source_file(name, source))?;
parser.recurse_into_file_modules = false;
Ok(parser)
}
/// Creates a new parser, handling errors as appropriate
/// if the file doesn't exist
pub fn new_parser_from_file<'a>(sess: &'a ParseSess, path: &Path) -> Parser<'a> {
source_file_to_parser(sess, file_to_source_file(sess, path, None))
}
/// Creates a new parser, returning buffered diagnostics if the file doesn't
/// exist or from lexing the initial token stream.
pub fn maybe_new_parser_from_file<'a>(sess: &'a ParseSess, path: &Path)
-> Result<Parser<'a>, Vec<Diagnostic>> {
let file = try_file_to_source_file(sess, path, None).map_err(|db| vec![db])?;
maybe_source_file_to_parser(sess, file)
}
/// Given a session, a crate config, a path, and a span, add
/// the file at the given path to the source_map, and return a parser.
/// On an error, use the given span as the source of the problem.
pub fn new_sub_parser_from_file<'a>(sess: &'a ParseSess,
path: &Path,
directory_ownership: DirectoryOwnership,
module_name: Option<String>,
sp: Span) -> Parser<'a> {
let mut p = source_file_to_parser(sess, file_to_source_file(sess, path, Some(sp)));
p.directory.ownership = directory_ownership;
p.root_module_name = module_name;
p
}
/// Given a source_file and config, return a parser
fn source_file_to_parser(sess: &ParseSess, source_file: Lrc<SourceFile>) -> Parser<'_> {
panictry_buffer!(&sess.span_diagnostic,
maybe_source_file_to_parser(sess, source_file))
}
/// Given a source_file and config, return a parser. Returns any buffered errors from lexing the
/// initial token stream.
fn maybe_source_file_to_parser(
sess: &ParseSess,
source_file: Lrc<SourceFile>,
) -> Result<Parser<'_>, Vec<Diagnostic>> {
let end_pos = source_file.end_pos;
let (stream, unclosed_delims) = maybe_file_to_stream(sess, source_file, None)?;
let mut parser = stream_to_parser(sess, stream, None);
parser.unclosed_delims = unclosed_delims;
if parser.token == token::Eof && parser.token.span.is_dummy() {
parser.token.span = Span::new(end_pos, end_pos, parser.token.span.ctxt());
}
Ok(parser)
}
// must preserve old name for now, because quote! from the *existing*
// compiler expands into it
pub fn new_parser_from_tts(sess: &ParseSess, tts: Vec<TokenTree>) -> Parser<'_> {
stream_to_parser(sess, tts.into_iter().collect(), crate::MACRO_ARGUMENTS)
}
// base abstractions
/// Given a session and a path and an optional span (for error reporting),
/// add the path to the session's source_map and return the new source_file or
/// error when a file can't be read.
fn try_file_to_source_file(sess: &ParseSess, path: &Path, spanopt: Option<Span>)
-> Result<Lrc<SourceFile>, Diagnostic> {
sess.source_map().load_file(path)
.map_err(|e| {
let msg = format!("couldn't read {}: {}", path.display(), e);
let mut diag = Diagnostic::new(Level::Fatal, &msg);
if let Some(sp) = spanopt {
diag.set_span(sp);
}
diag
})
}
/// Given a session and a path and an optional span (for error reporting),
/// add the path to the session's `source_map` and return the new `source_file`.
fn file_to_source_file(sess: &ParseSess, path: &Path, spanopt: Option<Span>)
-> Lrc<SourceFile> {
match try_file_to_source_file(sess, path, spanopt) {
Ok(source_file) => source_file,
Err(d) => {
DiagnosticBuilder::new_diagnostic(&sess.span_diagnostic, d).emit();
FatalError.raise();
}
}
}
/// Given a source_file, produces a sequence of token trees.
pub fn source_file_to_stream(
sess: &ParseSess,
source_file: Lrc<SourceFile>,
override_span: Option<Span>,
) -> (TokenStream, Vec<lexer::UnmatchedBrace>) {
panictry_buffer!(&sess.span_diagnostic, maybe_file_to_stream(sess, source_file, override_span))
}
/// Given a source file, produces a sequence of token trees. Returns any buffered errors from
/// parsing the token stream.
pub fn maybe_file_to_stream(
sess: &ParseSess,
source_file: Lrc<SourceFile>,
override_span: Option<Span>,
) -> Result<(TokenStream, Vec<lexer::UnmatchedBrace>), Vec<Diagnostic>> {
let srdr = lexer::StringReader::new(sess, source_file, override_span);
let (token_trees, unmatched_braces) = srdr.into_token_trees();
match token_trees {
Ok(stream) => Ok((stream, unmatched_braces)),
Err(err) => {
let mut buffer = Vec::with_capacity(1);
err.buffer(&mut buffer);
// Not using `emit_unclosed_delims` to use `db.buffer`
for unmatched in unmatched_braces {
let mut db = sess.span_diagnostic.struct_span_err(unmatched.found_span, &format!(
"incorrect close delimiter: `{}`",
pprust::token_kind_to_string(&token::CloseDelim(unmatched.found_delim)),
));
db.span_label(unmatched.found_span, "incorrect close delimiter");
if let Some(sp) = unmatched.candidate_span {
db.span_label(sp, "close delimiter possibly meant for this");
}
if let Some(sp) = unmatched.unclosed_span {
db.span_label(sp, "un-closed delimiter");
}
db.buffer(&mut buffer);
}
Err(buffer)
}
}
}
/// Given stream and the `ParseSess`, produces a parser.
pub fn stream_to_parser<'a>(
sess: &'a ParseSess,
stream: TokenStream,
subparser_name: Option<&'static str>,
) -> Parser<'a> {
Parser::new(sess, stream, None, true, false, subparser_name)
}
/// Given stream, the `ParseSess` and the base directory, produces a parser.
///
/// Use this function when you are creating a parser from the token stream
/// and also care about the current working directory of the parser (e.g.,
/// you are trying to resolve modules defined inside a macro invocation).
///
/// # Note
///
/// The main usage of this function is outside of rustc, for those who uses
/// libsyntax as a library. Please do not remove this function while refactoring
/// just because it is not used in rustc codebase!
pub fn stream_to_parser_with_base_dir<'a>(
sess: &'a ParseSess,
stream: TokenStream,
base_dir: Directory<'a>,
) -> Parser<'a> {
Parser::new(sess, stream, Some(base_dir), true, false, None)
}
/// A sequence separator.
pub struct SeqSep {
/// The separator token.
pub sep: Option<TokenKind>,
/// `true` if a trailing separator is allowed.
pub trailing_sep_allowed: bool,
}
impl SeqSep {
pub fn trailing_allowed(t: TokenKind) -> SeqSep {
SeqSep {
sep: Some(t),
trailing_sep_allowed: true,
}
}
pub fn none() -> SeqSep {
SeqSep {
sep: None,
trailing_sep_allowed: false,
}
}
}
| 36.919192 | 100 | 0.64617 |
166db925032af7e8c9e1a0cbd7e853709404ce40 | 1,630 | use crate::common::jormungandr::ConfigurationBuilder;
use crate::common::startup;
use jormungandr_lib::interfaces::{ActiveSlotCoefficient, KESUpdateSpeed};
use jormungandr_testing_utils::testing::{
FragmentGenerator, FragmentSenderSetup, FragmentStatusProvider,
};
pub use jortestkit::console::progress_bar::{parse_progress_bar_mode_from_str, ProgressBarMode};
use jortestkit::load::{self, Configuration, Monitor};
#[test]
pub fn fragment_load_test() {
let mut faucet = startup::create_new_account_address();
let (mut jormungandr, _) = startup::start_stake_pool(
&[faucet.clone()],
&[],
ConfigurationBuilder::new()
.with_slots_per_epoch(60)
.with_consensus_genesis_praos_active_slot_coeff(ActiveSlotCoefficient::MAXIMUM)
.with_slot_duration(4)
.with_epoch_stability_depth(10)
.with_kes_update_speed(KESUpdateSpeed::new(43200).unwrap()),
)
.unwrap();
jormungandr.steal_temp_dir().unwrap().into_persistent();
let configuration = Configuration::duration(
10,
std::time::Duration::from_secs(60),
100,
Monitor::Standard(100),
0,
);
let mut request_generator = FragmentGenerator::new(
FragmentSenderSetup::no_verify(),
jormungandr.to_remote(),
jormungandr.genesis_block_hash(),
jormungandr.fees(),
);
request_generator.fill_from_faucet(&mut faucet);
load::start_async(
request_generator,
FragmentStatusProvider::new(jormungandr.to_remote()),
configuration,
"Wallet backend load test",
);
}
| 31.960784 | 95 | 0.681595 |
e98ac4103b47fc1c42c338ff06153e145f0c092a | 4,105 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use std::collections::HashSet;
use std::sync::Arc;
use common_dal::DataAccessor;
use common_exception::Result;
use crate::sessions::QueryContext;
use crate::storages::fuse::io::snapshot_location;
use crate::storages::fuse::io::SegmentReader;
use crate::storages::fuse::io::SnapshotReader;
use crate::storages::fuse::meta::TableSnapshot;
use crate::storages::fuse::FuseTable;
use crate::storages::fuse::TBL_OPT_KEY_SNAPSHOT_LOC;
use crate::storages::Table;
impl FuseTable {
pub async fn do_optimize(
&self,
ctx: Arc<QueryContext>,
keep_last_snapshot: bool,
) -> Result<()> {
let da = ctx.get_storage_accessor()?;
let tbl_info = self.get_table_info();
let snapshot_loc = tbl_info.meta.options.get(TBL_OPT_KEY_SNAPSHOT_LOC);
let mut snapshots = SnapshotReader::read_snapshot_history(
da.as_ref(),
snapshot_loc,
ctx.get_storage_cache(),
)
.await?;
let min_history_len = if !keep_last_snapshot { 0 } else { 1 };
// short cut
if snapshots.len() <= min_history_len {
return Ok(());
}
let current_segments: HashSet<&String>;
let current_snapshot: TableSnapshot;
if !keep_last_snapshot {
// if truncate_all requested, gc root contains nothing;
current_segments = HashSet::new();
} else {
current_snapshot = snapshots.remove(0);
current_segments = HashSet::from_iter(¤t_snapshot.segments);
}
let prevs = snapshots.iter().fold(HashSet::new(), |mut acc, s| {
acc.extend(&s.segments);
acc
});
// segments which no longer need to be kept
let seg_delta = prevs.difference(¤t_segments).collect::<Vec<_>>();
// blocks to be removed
let prev_blocks: HashSet<String> = self
.blocks_of(da.clone(), seg_delta.iter(), ctx.clone())
.await?;
let current_blocks: HashSet<String> = self
.blocks_of(da.clone(), current_segments.iter(), ctx.clone())
.await?;
let block_delta = prev_blocks.difference(¤t_blocks);
// NOTE: the following actions are NOT transactional yet
// 1. remove blocks
for x in block_delta {
self.remove_location(da.clone(), x).await?;
}
// 2. remove the segments
for x in seg_delta {
self.remove_location(da.clone(), x).await?;
}
// 3. remove the snapshots
for x in snapshots.iter().rev() {
let loc = snapshot_location(&x.snapshot_id);
self.remove_location(da.clone(), loc).await?
}
Ok(())
}
async fn blocks_of(
&self,
data_accessor: Arc<dyn DataAccessor>,
locations: impl Iterator<Item = impl AsRef<str>>,
ctx: Arc<QueryContext>,
) -> Result<HashSet<String>> {
let mut result = HashSet::new();
for x in locations {
let res =
SegmentReader::read(data_accessor.as_ref(), x, ctx.get_storage_cache()).await?;
for block_meta in res.blocks {
result.insert(block_meta.location.path);
}
}
Ok(result)
}
async fn remove_location(
&self,
data_accessor: Arc<dyn DataAccessor>,
location: impl AsRef<str>,
) -> Result<()> {
data_accessor.remove(location.as_ref()).await
}
}
| 32.322835 | 95 | 0.609501 |
ab106202db00968a782b9a38e7a734eca6edfa69 | 3,396 | #![no_std]
#![no_main]
#![feature(custom_test_frameworks)]
#![test_runner(rskernel::test_runner)]
#![reexport_test_harness_main = "test_main"]
extern crate alloc;
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
use bootloader::{entry_point, BootInfo};
use core::panic::PanicInfo;
use rskernel::{
println,
task::{executor::Executor, keyboard, simple_executor::SimpleExecutor, Task},
};
use x86_64::{structures::paging::PageTable, VirtAddr};
entry_point!(kernel_main);
fn kernel_main(boot_info: &'static BootInfo) -> ! {
use rskernel::allocator;
use rskernel::memory::{self, BootInfoFrameAllocator};
use x86_64::structures::paging::{Page, Translate};
println!("Hello World{}", "!");
rskernel::init();
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
// map an unused page
let page = Page::containing_address(VirtAddr::new(0));
memory::create_example_mapping(page, &mut mapper, &mut frame_allocator);
// write the string `New!` to the screen through the new mapping
let page_ptr: *mut u64 = page.start_address().as_mut_ptr();
unsafe { page_ptr.offset(400).write_volatile(0x_f021_f077_f065_f04e) };
let addresses = [
// the identity-mapped vga buffer page
0xb8000,
// some code page
0x201008,
// some stack page
0x0100_0020_1a10,
// virtual address mapped to physical address 0
boot_info.physical_memory_offset,
];
for &address in &addresses {
let virt = VirtAddr::new(address);
let phys = mapper.translate_addr(virt);
println!("{:?} -> {:?}", virt, phys);
}
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
// allocate a number on the heap
let heap_value = Box::new(41);
println!("heap_value at {:p}", heap_value);
// create a dynamically sized vector
let mut vec = Vec::new();
for i in 0..500 {
vec.push(i);
}
println!("vec at {:p}", vec.as_slice());
// create a reference counted vector -> will be freed when count reaches 0
let reference_counted = Rc::new(vec![1, 2, 3]);
let cloned_reference = reference_counted.clone();
println!(
"current reference count is {}",
Rc::strong_count(&cloned_reference)
);
core::mem::drop(reference_counted);
println!(
"reference count is {} now",
Rc::strong_count(&cloned_reference)
);
// invoke a breakpoint exception
x86_64::instructions::interrupts::int3();
let mut executor = Executor::new();
executor.spawn(Task::new(example_task()));
executor.spawn(Task::new(keyboard::print_keypresses()));
executor.run();
#[cfg(test)]
test_main();
println!("It did not crash!");
rskernel::hlt_loop();
}
async fn async_number() -> u32 {
42
}
async fn example_task() {
let number = async_number().await;
println!("async number: {}", number);
}
/// This function is called on panic.
#[cfg(not(test))]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
println!("{}", info);
rskernel::hlt_loop();
}
#[cfg(test)]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
rskernel::test_panic_handler(info)
}
| 28.537815 | 97 | 0.649293 |
dbac90382d1d7477d421ca2824026cb4778e415f | 275 | // aux-build:make-macro.rs
// aux-build:meta-macro.rs
// edition:2018
// compile-flags: -Z span-debug
// run-pass
#![no_std] // Don't load unnecessary hygiene information from std
extern crate std;
extern crate meta_macro;
fn main() {
meta_macro::print_def_site!();
}
| 18.333333 | 65 | 0.701818 |
4a1baa70dea06a5d92fc8998d2139f66780b73c2 | 2,083 | #[macro_use]
extern crate chan;
use std::cmp::max;
use std::collections::HashMap;
use std::process::Command;
use dotproperties::parse_from_file;
use regex::Regex;
use sysinfo::{Pid, ProcessExt, RefreshKind, System, SystemExt};
fn main() {
let config: HashMap<_, _> = parse_from_file("jboss-sentinel.properties")
.map(|c| c.into_iter().collect())
.expect("Missing jboss-sentinel.properties file.");
let interval = config.get("interval")
.and_then(|v| str::parse::<u32>(v).ok())
.map(|v| max(v, 1))
.unwrap_or(10);
let command = config.get("command")
.expect("Missing 'command' property.");
println!("Starting JBoss Sentinel");
println!(" - watch interval (seconds): {:?}", interval);
println!(" - command: {:?}", command);
let mut system = System::new_with_specifics(RefreshKind::new().with_processes());
let name_pattern = Regex::new("jboss\\.home\\.dir").unwrap();
let timer = chan::tick_ms(interval * 1000);
let mut pid: Option<Pid> = None;
loop {
chan_select! {
timer.recv() => match check_server(&mut system, &name_pattern) {
Some(p) if pid.filter(|v| &p == v).is_some() => {},
Some(p) => {
pid = Some(p);
println!("JBoss process found: {:?}", p)
},
None => {
println!("No JBoss process found.");
match &Command::new("cmd").arg("/C").arg("start").arg("cmd").arg("/C").arg(command).spawn() {
Ok(_) => println!("Spawned new server instance: {:?}", command),
Err(error) => panic!("Failed to restart the server: {:?}", error)
};
},
},
}
}
}
fn check_server(system: &mut System, name_pattern: &Regex) -> Option<Pid> {
system.refresh_processes();
system.get_processes().values()
.find(|p| p.cmd().to_vec().iter().any(|a| name_pattern.find(a.as_str()).is_some()))
.map(|p| p.pid())
}
| 32.046154 | 113 | 0.538166 |
f9760d31613f674250b63698f87ae78c17094a4b | 24,653 | //! `src/op.rs` is automatically generated by `./utils/gen.py` from `utils/opcodes.txt`.
//! don't modify this file directly, instead run `python3 ./utils/gen.py`.
use std::{fmt, mem};
#[allow(non_camel_case_types)]
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RawOpcode {
NOP = 0x00,
LXI_B = 0x01,
STAX_B = 0x02,
INX_B = 0x03,
INR_B = 0x04,
DCR_B = 0x05,
MVI_B = 0x06,
RLC = 0x07,
DAD_B = 0x09,
LDAX_B = 0x0a,
DCX_B = 0x0b,
INR_C = 0x0c,
DCR_C = 0x0d,
MVI_C = 0x0e,
RRC = 0x0f,
LXI_D = 0x11,
STAX_D = 0x12,
INX_D = 0x13,
INR_D = 0x14,
DCR_D = 0x15,
MVI_D = 0x16,
RAL = 0x17,
DAD_D = 0x19,
LDAX_D = 0x1a,
DCX_D = 0x1b,
INR_E = 0x1c,
DCR_E = 0x1d,
MVI_E = 0x1e,
RAR = 0x1f,
LXI_H = 0x21,
SHLD = 0x22,
INX_H = 0x23,
INR_H = 0x24,
DCR_H = 0x25,
MVI_H = 0x26,
DAA = 0x27,
DAD_H = 0x29,
LHLD = 0x2a,
DCX_H = 0x2b,
INR_L = 0x2c,
DCR_L = 0x2d,
MVI_L = 0x2e,
CMA = 0x2f,
LXI_SP = 0x31,
STA = 0x32,
INX_SP = 0x33,
INR_M = 0x34,
DCR_M = 0x35,
MVI_M = 0x36,
STC = 0x37,
DAD_SP = 0x39,
LDA = 0x3a,
DCX_SP = 0x3b,
INR_A = 0x3c,
DCR_A = 0x3d,
MVI_A = 0x3e,
CMC = 0x3f,
MOV_B_B = 0x40,
MOV_B_C = 0x41,
MOV_B_D = 0x42,
MOV_B_E = 0x43,
MOV_B_H = 0x44,
MOV_B_L = 0x45,
MOV_B_M = 0x46,
MOV_B_A = 0x47,
MOV_C_B = 0x48,
MOV_C_C = 0x49,
MOV_C_D = 0x4a,
MOV_C_E = 0x4b,
MOV_C_H = 0x4c,
MOV_C_L = 0x4d,
MOV_C_M = 0x4e,
MOV_C_A = 0x4f,
MOV_D_B = 0x50,
MOV_D_C = 0x51,
MOV_D_D = 0x52,
MOV_D_E = 0x53,
MOV_D_H = 0x54,
MOV_D_L = 0x55,
MOV_D_M = 0x56,
MOV_D_A = 0x57,
MOV_E_B = 0x58,
MOV_E_C = 0x59,
MOV_E_D = 0x5a,
MOV_E_E = 0x5b,
MOV_E_H = 0x5c,
MOV_E_L = 0x5d,
MOV_E_M = 0x5e,
MOV_E_A = 0x5f,
MOV_H_B = 0x60,
MOV_H_C = 0x61,
MOV_H_D = 0x62,
MOV_H_E = 0x63,
MOV_H_H = 0x64,
MOV_H_L = 0x65,
MOV_H_M = 0x66,
MOV_H_A = 0x67,
MOV_L_B = 0x68,
MOV_L_C = 0x69,
MOV_L_D = 0x6a,
MOV_L_E = 0x6b,
MOV_L_H = 0x6c,
MOV_L_L = 0x6d,
MOV_L_M = 0x6e,
MOV_L_A = 0x6f,
MOV_M_B = 0x70,
MOV_M_C = 0x71,
MOV_M_D = 0x72,
MOV_M_E = 0x73,
MOV_M_H = 0x74,
MOV_M_L = 0x75,
HLT = 0x76,
MOV_M_A = 0x77,
MOV_A_B = 0x78,
MOV_A_C = 0x79,
MOV_A_D = 0x7a,
MOV_A_E = 0x7b,
MOV_A_H = 0x7c,
MOV_A_L = 0x7d,
MOV_A_M = 0x7e,
MOV_A_A = 0x7f,
ADD_B = 0x80,
ADD_C = 0x81,
ADD_D = 0x82,
ADD_E = 0x83,
ADD_H = 0x84,
ADD_L = 0x85,
ADD_M = 0x86,
ADD_A = 0x87,
ADC_B = 0x88,
ADC_C = 0x89,
ADC_D = 0x8a,
ADC_E = 0x8b,
ADC_H = 0x8c,
ADC_L = 0x8d,
ADC_M = 0x8e,
ADC_A = 0x8f,
SUB_B = 0x90,
SUB_C = 0x91,
SUB_D = 0x92,
SUB_E = 0x93,
SUB_H = 0x94,
SUB_L = 0x95,
SUB_M = 0x96,
SUB_A = 0x97,
SBB_B = 0x98,
SBB_C = 0x99,
SBB_D = 0x9a,
SBB_E = 0x9b,
SBB_H = 0x9c,
SBB_L = 0x9d,
SBB_M = 0x9e,
SBB_A = 0x9f,
ANA_B = 0xa0,
ANA_C = 0xa1,
ANA_D = 0xa2,
ANA_E = 0xa3,
ANA_H = 0xa4,
ANA_L = 0xa5,
ANA_M = 0xa6,
ANA_A = 0xa7,
XRA_B = 0xa8,
XRA_C = 0xa9,
XRA_D = 0xaa,
XRA_E = 0xab,
XRA_H = 0xac,
XRA_L = 0xad,
XRA_M = 0xae,
XRA_A = 0xaf,
ORA_B = 0xb0,
ORA_C = 0xb1,
ORA_D = 0xb2,
ORA_E = 0xb3,
ORA_H = 0xb4,
ORA_L = 0xb5,
ORA_M = 0xb6,
ORA_A = 0xb7,
CMP_B = 0xb8,
CMP_C = 0xb9,
CMP_D = 0xba,
CMP_E = 0xbb,
CMP_H = 0xbc,
CMP_L = 0xbd,
CMP_M = 0xbe,
CMP_A = 0xbf,
RNZ = 0xc0,
POP_B = 0xc1,
JNZ = 0xc2,
JMP = 0xc3,
CNZ = 0xc4,
PUSH_B = 0xc5,
ADI = 0xc6,
RST_0 = 0xc7,
RZ = 0xc8,
RET = 0xc9,
JZ = 0xca,
CZ = 0xcc,
CALL = 0xcd,
ACI = 0xce,
RST_1 = 0xcf,
RNC = 0xd0,
POP_D = 0xd1,
JNC = 0xd2,
OUT = 0xd3,
CNC = 0xd4,
PUSH_D = 0xd5,
SUI = 0xd6,
RST_2 = 0xd7,
RC = 0xd8,
JC = 0xda,
IN = 0xdb,
CC = 0xdc,
SBI = 0xde,
RST_3 = 0xdf,
RPO = 0xe0,
POP_H = 0xe1,
JPO = 0xe2,
XTHL = 0xe3,
CPO = 0xe4,
PUSH_H = 0xe5,
ANI = 0xe6,
RST_4 = 0xe7,
RPE = 0xe8,
PCHL = 0xe9,
JPE = 0xea,
XCHG = 0xeb,
CPE = 0xec,
XRI = 0xee,
RST_5 = 0xef,
RP = 0xf0,
POP_PSW = 0xf1,
JP = 0xf2,
DI = 0xf3,
CP = 0xf4,
PUSH_PSW = 0xf5,
ORI = 0xf6,
RST_6 = 0xf7,
RM = 0xf8,
SPHL = 0xf9,
JM = 0xfa,
EI = 0xfb,
CM = 0xfc,
CPI = 0xfe,
RST_7 = 0xff,
}
impl RawOpcode {
pub fn size(&self) -> usize {
match *self {
RawOpcode::NOP => 1,
RawOpcode::LXI_B => 3,
RawOpcode::STAX_B => 1,
RawOpcode::INX_B => 1,
RawOpcode::INR_B => 1,
RawOpcode::DCR_B => 1,
RawOpcode::MVI_B => 2,
RawOpcode::RLC => 1,
RawOpcode::DAD_B => 1,
RawOpcode::LDAX_B => 1,
RawOpcode::DCX_B => 1,
RawOpcode::INR_C => 1,
RawOpcode::DCR_C => 1,
RawOpcode::MVI_C => 2,
RawOpcode::RRC => 1,
RawOpcode::LXI_D => 3,
RawOpcode::STAX_D => 1,
RawOpcode::INX_D => 1,
RawOpcode::INR_D => 1,
RawOpcode::DCR_D => 1,
RawOpcode::MVI_D => 2,
RawOpcode::RAL => 1,
RawOpcode::DAD_D => 1,
RawOpcode::LDAX_D => 1,
RawOpcode::DCX_D => 1,
RawOpcode::INR_E => 1,
RawOpcode::DCR_E => 1,
RawOpcode::MVI_E => 2,
RawOpcode::RAR => 1,
RawOpcode::LXI_H => 3,
RawOpcode::SHLD => 3,
RawOpcode::INX_H => 1,
RawOpcode::INR_H => 1,
RawOpcode::DCR_H => 1,
RawOpcode::MVI_H => 2,
RawOpcode::DAA => 1,
RawOpcode::DAD_H => 1,
RawOpcode::LHLD => 3,
RawOpcode::DCX_H => 1,
RawOpcode::INR_L => 1,
RawOpcode::DCR_L => 1,
RawOpcode::MVI_L => 2,
RawOpcode::CMA => 1,
RawOpcode::LXI_SP => 3,
RawOpcode::STA => 3,
RawOpcode::INX_SP => 1,
RawOpcode::INR_M => 1,
RawOpcode::DCR_M => 1,
RawOpcode::MVI_M => 2,
RawOpcode::STC => 1,
RawOpcode::DAD_SP => 1,
RawOpcode::LDA => 3,
RawOpcode::DCX_SP => 1,
RawOpcode::INR_A => 1,
RawOpcode::DCR_A => 1,
RawOpcode::MVI_A => 2,
RawOpcode::CMC => 1,
RawOpcode::MOV_B_B => 1,
RawOpcode::MOV_B_C => 1,
RawOpcode::MOV_B_D => 1,
RawOpcode::MOV_B_E => 1,
RawOpcode::MOV_B_H => 1,
RawOpcode::MOV_B_L => 1,
RawOpcode::MOV_B_M => 1,
RawOpcode::MOV_B_A => 1,
RawOpcode::MOV_C_B => 1,
RawOpcode::MOV_C_C => 1,
RawOpcode::MOV_C_D => 1,
RawOpcode::MOV_C_E => 1,
RawOpcode::MOV_C_H => 1,
RawOpcode::MOV_C_L => 1,
RawOpcode::MOV_C_M => 1,
RawOpcode::MOV_C_A => 1,
RawOpcode::MOV_D_B => 1,
RawOpcode::MOV_D_C => 1,
RawOpcode::MOV_D_D => 1,
RawOpcode::MOV_D_E => 1,
RawOpcode::MOV_D_H => 1,
RawOpcode::MOV_D_L => 1,
RawOpcode::MOV_D_M => 1,
RawOpcode::MOV_D_A => 1,
RawOpcode::MOV_E_B => 1,
RawOpcode::MOV_E_C => 1,
RawOpcode::MOV_E_D => 1,
RawOpcode::MOV_E_E => 1,
RawOpcode::MOV_E_H => 1,
RawOpcode::MOV_E_L => 1,
RawOpcode::MOV_E_M => 1,
RawOpcode::MOV_E_A => 1,
RawOpcode::MOV_H_B => 1,
RawOpcode::MOV_H_C => 1,
RawOpcode::MOV_H_D => 1,
RawOpcode::MOV_H_E => 1,
RawOpcode::MOV_H_H => 1,
RawOpcode::MOV_H_L => 1,
RawOpcode::MOV_H_M => 1,
RawOpcode::MOV_H_A => 1,
RawOpcode::MOV_L_B => 1,
RawOpcode::MOV_L_C => 1,
RawOpcode::MOV_L_D => 1,
RawOpcode::MOV_L_E => 1,
RawOpcode::MOV_L_H => 1,
RawOpcode::MOV_L_L => 1,
RawOpcode::MOV_L_M => 1,
RawOpcode::MOV_L_A => 1,
RawOpcode::MOV_M_B => 1,
RawOpcode::MOV_M_C => 1,
RawOpcode::MOV_M_D => 1,
RawOpcode::MOV_M_E => 1,
RawOpcode::MOV_M_H => 1,
RawOpcode::MOV_M_L => 1,
RawOpcode::HLT => 1,
RawOpcode::MOV_M_A => 1,
RawOpcode::MOV_A_B => 1,
RawOpcode::MOV_A_C => 1,
RawOpcode::MOV_A_D => 1,
RawOpcode::MOV_A_E => 1,
RawOpcode::MOV_A_H => 1,
RawOpcode::MOV_A_L => 1,
RawOpcode::MOV_A_M => 1,
RawOpcode::MOV_A_A => 1,
RawOpcode::ADD_B => 1,
RawOpcode::ADD_C => 1,
RawOpcode::ADD_D => 1,
RawOpcode::ADD_E => 1,
RawOpcode::ADD_H => 1,
RawOpcode::ADD_L => 1,
RawOpcode::ADD_M => 1,
RawOpcode::ADD_A => 1,
RawOpcode::ADC_B => 1,
RawOpcode::ADC_C => 1,
RawOpcode::ADC_D => 1,
RawOpcode::ADC_E => 1,
RawOpcode::ADC_H => 1,
RawOpcode::ADC_L => 1,
RawOpcode::ADC_M => 1,
RawOpcode::ADC_A => 1,
RawOpcode::SUB_B => 1,
RawOpcode::SUB_C => 1,
RawOpcode::SUB_D => 1,
RawOpcode::SUB_E => 1,
RawOpcode::SUB_H => 1,
RawOpcode::SUB_L => 1,
RawOpcode::SUB_M => 1,
RawOpcode::SUB_A => 1,
RawOpcode::SBB_B => 1,
RawOpcode::SBB_C => 1,
RawOpcode::SBB_D => 1,
RawOpcode::SBB_E => 1,
RawOpcode::SBB_H => 1,
RawOpcode::SBB_L => 1,
RawOpcode::SBB_M => 1,
RawOpcode::SBB_A => 1,
RawOpcode::ANA_B => 1,
RawOpcode::ANA_C => 1,
RawOpcode::ANA_D => 1,
RawOpcode::ANA_E => 1,
RawOpcode::ANA_H => 1,
RawOpcode::ANA_L => 1,
RawOpcode::ANA_M => 1,
RawOpcode::ANA_A => 1,
RawOpcode::XRA_B => 1,
RawOpcode::XRA_C => 1,
RawOpcode::XRA_D => 1,
RawOpcode::XRA_E => 1,
RawOpcode::XRA_H => 1,
RawOpcode::XRA_L => 1,
RawOpcode::XRA_M => 1,
RawOpcode::XRA_A => 1,
RawOpcode::ORA_B => 1,
RawOpcode::ORA_C => 1,
RawOpcode::ORA_D => 1,
RawOpcode::ORA_E => 1,
RawOpcode::ORA_H => 1,
RawOpcode::ORA_L => 1,
RawOpcode::ORA_M => 1,
RawOpcode::ORA_A => 1,
RawOpcode::CMP_B => 1,
RawOpcode::CMP_C => 1,
RawOpcode::CMP_D => 1,
RawOpcode::CMP_E => 1,
RawOpcode::CMP_H => 1,
RawOpcode::CMP_L => 1,
RawOpcode::CMP_M => 1,
RawOpcode::CMP_A => 1,
RawOpcode::RNZ => 1,
RawOpcode::POP_B => 1,
RawOpcode::JNZ => 3,
RawOpcode::JMP => 3,
RawOpcode::CNZ => 3,
RawOpcode::PUSH_B => 1,
RawOpcode::ADI => 2,
RawOpcode::RST_0 => 1,
RawOpcode::RZ => 1,
RawOpcode::RET => 1,
RawOpcode::JZ => 3,
RawOpcode::CZ => 3,
RawOpcode::CALL => 3,
RawOpcode::ACI => 2,
RawOpcode::RST_1 => 1,
RawOpcode::RNC => 1,
RawOpcode::POP_D => 1,
RawOpcode::JNC => 3,
RawOpcode::OUT => 2,
RawOpcode::CNC => 3,
RawOpcode::PUSH_D => 1,
RawOpcode::SUI => 2,
RawOpcode::RST_2 => 1,
RawOpcode::RC => 1,
RawOpcode::JC => 3,
RawOpcode::IN => 2,
RawOpcode::CC => 3,
RawOpcode::SBI => 2,
RawOpcode::RST_3 => 1,
RawOpcode::RPO => 1,
RawOpcode::POP_H => 1,
RawOpcode::JPO => 3,
RawOpcode::XTHL => 1,
RawOpcode::CPO => 3,
RawOpcode::PUSH_H => 1,
RawOpcode::ANI => 2,
RawOpcode::RST_4 => 1,
RawOpcode::RPE => 1,
RawOpcode::PCHL => 1,
RawOpcode::JPE => 3,
RawOpcode::XCHG => 1,
RawOpcode::CPE => 3,
RawOpcode::XRI => 2,
RawOpcode::RST_5 => 1,
RawOpcode::RP => 1,
RawOpcode::POP_PSW => 1,
RawOpcode::JP => 3,
RawOpcode::DI => 1,
RawOpcode::CP => 3,
RawOpcode::PUSH_PSW => 1,
RawOpcode::ORI => 2,
RawOpcode::RST_6 => 1,
RawOpcode::RM => 1,
RawOpcode::SPHL => 1,
RawOpcode::JM => 3,
RawOpcode::EI => 1,
RawOpcode::CM => 3,
RawOpcode::CPI => 2,
RawOpcode::RST_7 => 1,
}
}
}
impl From<u8> for RawOpcode {
fn from(t: u8) -> RawOpcode {
match t {
// Undocumented ops
0x08 | 0x10 | 0x18 | 0x20 | 0x28 | 0x30 | 0x38 => RawOpcode::NOP,
0xd9 => RawOpcode::RET,
0xdd | 0xed | 0xfd => RawOpcode::CALL,
0xcb => RawOpcode::JMP,
_ => unsafe { mem::transmute(t) },
}
}
}
impl From<&u8> for RawOpcode {
fn from(t: &u8) -> RawOpcode {
From::from(*t)
}
}
impl Into<u8> for RawOpcode {
fn into(self) -> u8 {
unsafe { mem::transmute(self) }
}
}
impl fmt::Display for RawOpcode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}(0x{:02x?})", self, *self as u8)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Opcode {
Nop,
LxiB(u8, u8),
StaxB,
InxB,
InrB,
DcrB,
MviB(u8),
Rlc,
DadB,
LdaxB,
DcxB,
InrC,
DcrC,
MviC(u8),
Rrc,
LxiD(u8, u8),
StaxD,
InxD,
InrD,
DcrD,
MviD(u8),
Ral,
DadD,
LdaxD,
DcxD,
InrE,
DcrE,
MviE(u8),
Rar,
LxiH(u8, u8),
Shld(u16),
InxH,
InrH,
DcrH,
MviH(u8),
Daa,
DadH,
Lhld(u16),
DcxH,
InrL,
DcrL,
MviL(u8),
Cma,
LxiSp(u8, u8),
Sta(u16),
InxSp,
InrM,
DcrM,
MviM(u8),
Stc,
DadSp,
Lda(u16),
DcxSp,
InrA,
DcrA,
MviA(u8),
Cmc,
MovBB,
MovBC,
MovBD,
MovBE,
MovBH,
MovBL,
MovBM,
MovBA,
MovCB,
MovCC,
MovCD,
MovCE,
MovCH,
MovCL,
MovCM,
MovCA,
MovDB,
MovDC,
MovDD,
MovDE,
MovDH,
MovDL,
MovDM,
MovDA,
MovEB,
MovEC,
MovED,
MovEE,
MovEH,
MovEL,
MovEM,
MovEA,
MovHB,
MovHC,
MovHD,
MovHE,
MovHH,
MovHL,
MovHM,
MovHA,
MovLB,
MovLC,
MovLD,
MovLE,
MovLH,
MovLL,
MovLM,
MovLA,
MovMB,
MovMC,
MovMD,
MovME,
MovMH,
MovML,
Hlt,
MovMA,
MovAB,
MovAC,
MovAD,
MovAE,
MovAH,
MovAL,
MovAM,
MovAA,
AddB,
AddC,
AddD,
AddE,
AddH,
AddL,
AddM,
AddA,
AdcB,
AdcC,
AdcD,
AdcE,
AdcH,
AdcL,
AdcM,
AdcA,
SubB,
SubC,
SubD,
SubE,
SubH,
SubL,
SubM,
SubA,
SbbB,
SbbC,
SbbD,
SbbE,
SbbH,
SbbL,
SbbM,
SbbA,
AnaB,
AnaC,
AnaD,
AnaE,
AnaH,
AnaL,
AnaM,
AnaA,
XraB,
XraC,
XraD,
XraE,
XraH,
XraL,
XraM,
XraA,
OraB,
OraC,
OraD,
OraE,
OraH,
OraL,
OraM,
OraA,
CmpB,
CmpC,
CmpD,
CmpE,
CmpH,
CmpL,
CmpM,
CmpA,
Rnz,
PopB,
Jnz(u16),
Jmp(u16),
Cnz(u16),
PushB,
Adi(u8),
Rst0,
Rz,
Ret,
Jz(u16),
Cz(u16),
Call(u16),
Aci(u8),
Rst1,
Rnc,
PopD,
Jnc(u16),
Out(u8),
Cnc(u16),
PushD,
Sui(u8),
Rst2,
Rc,
Jc(u16),
In(u8),
Cc(u16),
Sbi(u8),
Rst3,
Rpo,
PopH,
Jpo(u16),
Xthl,
Cpo(u16),
PushH,
Ani(u8),
Rst4,
Rpe,
Pchl,
Jpe(u16),
Xchg,
Cpe(u16),
Xri(u8),
Rst5,
Rp,
PopPsw,
Jp(u16),
Di,
Cp(u16),
PushPsw,
Ori(u8),
Rst6,
Rm,
Sphl,
Jm(u16),
Ei,
Cm(u16),
Cpi(u8),
Rst7,
}
impl Opcode {
pub fn size(&self) -> usize {
match *self {
Opcode::Nop => 1,
Opcode::LxiB(_, _) => 3,
Opcode::StaxB => 1,
Opcode::InxB => 1,
Opcode::InrB => 1,
Opcode::DcrB => 1,
Opcode::MviB(_) => 2,
Opcode::Rlc => 1,
Opcode::DadB => 1,
Opcode::LdaxB => 1,
Opcode::DcxB => 1,
Opcode::InrC => 1,
Opcode::DcrC => 1,
Opcode::MviC(_) => 2,
Opcode::Rrc => 1,
Opcode::LxiD(_, _) => 3,
Opcode::StaxD => 1,
Opcode::InxD => 1,
Opcode::InrD => 1,
Opcode::DcrD => 1,
Opcode::MviD(_) => 2,
Opcode::Ral => 1,
Opcode::DadD => 1,
Opcode::LdaxD => 1,
Opcode::DcxD => 1,
Opcode::InrE => 1,
Opcode::DcrE => 1,
Opcode::MviE(_) => 2,
Opcode::Rar => 1,
Opcode::LxiH(_, _) => 3,
Opcode::Shld(_) => 3,
Opcode::InxH => 1,
Opcode::InrH => 1,
Opcode::DcrH => 1,
Opcode::MviH(_) => 2,
Opcode::Daa => 1,
Opcode::DadH => 1,
Opcode::Lhld(_) => 3,
Opcode::DcxH => 1,
Opcode::InrL => 1,
Opcode::DcrL => 1,
Opcode::MviL(_) => 2,
Opcode::Cma => 1,
Opcode::LxiSp(_, _) => 3,
Opcode::Sta(_) => 3,
Opcode::InxSp => 1,
Opcode::InrM => 1,
Opcode::DcrM => 1,
Opcode::MviM(_) => 2,
Opcode::Stc => 1,
Opcode::DadSp => 1,
Opcode::Lda(_) => 3,
Opcode::DcxSp => 1,
Opcode::InrA => 1,
Opcode::DcrA => 1,
Opcode::MviA(_) => 2,
Opcode::Cmc => 1,
Opcode::MovBB => 1,
Opcode::MovBC => 1,
Opcode::MovBD => 1,
Opcode::MovBE => 1,
Opcode::MovBH => 1,
Opcode::MovBL => 1,
Opcode::MovBM => 1,
Opcode::MovBA => 1,
Opcode::MovCB => 1,
Opcode::MovCC => 1,
Opcode::MovCD => 1,
Opcode::MovCE => 1,
Opcode::MovCH => 1,
Opcode::MovCL => 1,
Opcode::MovCM => 1,
Opcode::MovCA => 1,
Opcode::MovDB => 1,
Opcode::MovDC => 1,
Opcode::MovDD => 1,
Opcode::MovDE => 1,
Opcode::MovDH => 1,
Opcode::MovDL => 1,
Opcode::MovDM => 1,
Opcode::MovDA => 1,
Opcode::MovEB => 1,
Opcode::MovEC => 1,
Opcode::MovED => 1,
Opcode::MovEE => 1,
Opcode::MovEH => 1,
Opcode::MovEL => 1,
Opcode::MovEM => 1,
Opcode::MovEA => 1,
Opcode::MovHB => 1,
Opcode::MovHC => 1,
Opcode::MovHD => 1,
Opcode::MovHE => 1,
Opcode::MovHH => 1,
Opcode::MovHL => 1,
Opcode::MovHM => 1,
Opcode::MovHA => 1,
Opcode::MovLB => 1,
Opcode::MovLC => 1,
Opcode::MovLD => 1,
Opcode::MovLE => 1,
Opcode::MovLH => 1,
Opcode::MovLL => 1,
Opcode::MovLM => 1,
Opcode::MovLA => 1,
Opcode::MovMB => 1,
Opcode::MovMC => 1,
Opcode::MovMD => 1,
Opcode::MovME => 1,
Opcode::MovMH => 1,
Opcode::MovML => 1,
Opcode::Hlt => 1,
Opcode::MovMA => 1,
Opcode::MovAB => 1,
Opcode::MovAC => 1,
Opcode::MovAD => 1,
Opcode::MovAE => 1,
Opcode::MovAH => 1,
Opcode::MovAL => 1,
Opcode::MovAM => 1,
Opcode::MovAA => 1,
Opcode::AddB => 1,
Opcode::AddC => 1,
Opcode::AddD => 1,
Opcode::AddE => 1,
Opcode::AddH => 1,
Opcode::AddL => 1,
Opcode::AddM => 1,
Opcode::AddA => 1,
Opcode::AdcB => 1,
Opcode::AdcC => 1,
Opcode::AdcD => 1,
Opcode::AdcE => 1,
Opcode::AdcH => 1,
Opcode::AdcL => 1,
Opcode::AdcM => 1,
Opcode::AdcA => 1,
Opcode::SubB => 1,
Opcode::SubC => 1,
Opcode::SubD => 1,
Opcode::SubE => 1,
Opcode::SubH => 1,
Opcode::SubL => 1,
Opcode::SubM => 1,
Opcode::SubA => 1,
Opcode::SbbB => 1,
Opcode::SbbC => 1,
Opcode::SbbD => 1,
Opcode::SbbE => 1,
Opcode::SbbH => 1,
Opcode::SbbL => 1,
Opcode::SbbM => 1,
Opcode::SbbA => 1,
Opcode::AnaB => 1,
Opcode::AnaC => 1,
Opcode::AnaD => 1,
Opcode::AnaE => 1,
Opcode::AnaH => 1,
Opcode::AnaL => 1,
Opcode::AnaM => 1,
Opcode::AnaA => 1,
Opcode::XraB => 1,
Opcode::XraC => 1,
Opcode::XraD => 1,
Opcode::XraE => 1,
Opcode::XraH => 1,
Opcode::XraL => 1,
Opcode::XraM => 1,
Opcode::XraA => 1,
Opcode::OraB => 1,
Opcode::OraC => 1,
Opcode::OraD => 1,
Opcode::OraE => 1,
Opcode::OraH => 1,
Opcode::OraL => 1,
Opcode::OraM => 1,
Opcode::OraA => 1,
Opcode::CmpB => 1,
Opcode::CmpC => 1,
Opcode::CmpD => 1,
Opcode::CmpE => 1,
Opcode::CmpH => 1,
Opcode::CmpL => 1,
Opcode::CmpM => 1,
Opcode::CmpA => 1,
Opcode::Rnz => 1,
Opcode::PopB => 1,
Opcode::Jnz(_) => 3,
Opcode::Jmp(_) => 3,
Opcode::Cnz(_) => 3,
Opcode::PushB => 1,
Opcode::Adi(_) => 2,
Opcode::Rst0 => 1,
Opcode::Rz => 1,
Opcode::Ret => 1,
Opcode::Jz(_) => 3,
Opcode::Cz(_) => 3,
Opcode::Call(_) => 3,
Opcode::Aci(_) => 2,
Opcode::Rst1 => 1,
Opcode::Rnc => 1,
Opcode::PopD => 1,
Opcode::Jnc(_) => 3,
Opcode::Out(_) => 2,
Opcode::Cnc(_) => 3,
Opcode::PushD => 1,
Opcode::Sui(_) => 2,
Opcode::Rst2 => 1,
Opcode::Rc => 1,
Opcode::Jc(_) => 3,
Opcode::In(_) => 2,
Opcode::Cc(_) => 3,
Opcode::Sbi(_) => 2,
Opcode::Rst3 => 1,
Opcode::Rpo => 1,
Opcode::PopH => 1,
Opcode::Jpo(_) => 3,
Opcode::Xthl => 1,
Opcode::Cpo(_) => 3,
Opcode::PushH => 1,
Opcode::Ani(_) => 2,
Opcode::Rst4 => 1,
Opcode::Rpe => 1,
Opcode::Pchl => 1,
Opcode::Jpe(_) => 3,
Opcode::Xchg => 1,
Opcode::Cpe(_) => 3,
Opcode::Xri(_) => 2,
Opcode::Rst5 => 1,
Opcode::Rp => 1,
Opcode::PopPsw => 1,
Opcode::Jp(_) => 3,
Opcode::Di => 1,
Opcode::Cp(_) => 3,
Opcode::PushPsw => 1,
Opcode::Ori(_) => 2,
Opcode::Rst6 => 1,
Opcode::Rm => 1,
Opcode::Sphl => 1,
Opcode::Jm(_) => 3,
Opcode::Ei => 1,
Opcode::Cm(_) => 3,
Opcode::Cpi(_) => 2,
Opcode::Rst7 => 1,
}
}
}
| 23.796332 | 88 | 0.4236 |
69423a59424471a9a0f854083f3761115a476ba2 | 5,028 | use crate::{
block::BlockStuff, engine_traits::EngineOperations, shard_state::ShardStateStuff
};
use std::{ops::Deref, sync::Arc};
use storage::types::BlockHandle;
use ton_types::{error, fail, Result};
use ton_block::BlockIdExt;
pub async fn apply_block(
handle: &Arc<BlockHandle>,
block: &BlockStuff,
mc_seq_no: u32,
engine: &Arc<dyn EngineOperations>,
pre_apply: bool,
) -> Result<()> {
if handle.id() != block.id() {
fail!("Block id mismatch in apply block: {} vs {}", handle.id(), block.id())
}
let prev_ids = block.construct_prev_id()?;
check_prev_blocks(&prev_ids, engine, mc_seq_no, pre_apply).await?;
let shard_state = if handle.has_state() {
engine.load_state(handle.id()).await?
} else {
calc_shard_state(handle, block, &prev_ids, engine).await?
};
if !pre_apply {
set_next_prev_ids(&handle, &prev_ids, engine.deref())?;
engine.process_block_in_ext_db(handle, &block, None, &shard_state).await?;
}
Ok(())
}
// Checks is prev block(s) applied and apply if need
async fn check_prev_blocks(
prev_ids: &(BlockIdExt, Option<BlockIdExt>),
engine: &Arc<dyn EngineOperations>,
mc_seq_no: u32,
pre_apply: bool,
) -> Result<()> {
match prev_ids {
(prev1_id, Some(prev2_id)) => {
let mut apply_prev_futures = Vec::with_capacity(2);
apply_prev_futures.push(
engine.clone().download_and_apply_block(&prev1_id, mc_seq_no, pre_apply)
);
apply_prev_futures.push(
engine.clone().download_and_apply_block(&prev2_id, mc_seq_no, pre_apply)
);
futures::future::join_all(apply_prev_futures)
.await
.into_iter()
.find(|r| r.is_err())
.unwrap_or(Ok(()))?;
},
(prev_id, None) => {
engine.clone().download_and_apply_block(&prev_id, mc_seq_no, pre_apply).await?;
}
}
Ok(())
}
// Gets prev block(s) state and applies merkle update from block to calculate new state
pub async fn calc_shard_state(
handle: &Arc<BlockHandle>,
block: &BlockStuff,
prev_ids: &(BlockIdExt, Option<BlockIdExt>),
engine: &Arc<dyn EngineOperations>
) -> Result<ShardStateStuff> {
log::trace!("calc_shard_state: block: {}", block.id());
let prev_ss_root = match prev_ids {
(prev1, Some(prev2)) => {
let ss1 = engine.clone().wait_state(prev1, None).await?.root_cell().clone();
let ss2 = engine.clone().wait_state(prev2, None).await?.root_cell().clone();
ShardStateStuff::construct_split_root(ss1, ss2)?
},
(prev, None) => {
engine.clone().wait_state(prev, None).await?.root_cell().clone()
}
};
let merkle_update = block
.block()
.read_state_update()?;
let block_id = block.id().clone();
let ss = tokio::task::spawn_blocking(move || -> Result<ShardStateStuff> {
let now = std::time::Instant::now();
let ss_root = merkle_update.apply_for(&prev_ss_root)?;
log::trace!("TIME: calc_shard_state: applied Merkle update {}ms {}",
now.elapsed().as_millis(), block_id);
ShardStateStuff::new(block_id.clone(), ss_root)
}).await??;
engine.store_state(handle, &ss).await?;
Ok(ss)
}
// Sets next block link for prev. block and prev. for current one
pub fn set_next_prev_ids(
handle: &Arc<BlockHandle>,
prev_ids: &(BlockIdExt, Option<BlockIdExt>),
engine: &dyn EngineOperations
) -> Result<()> {
match prev_ids {
(prev_id1, Some(prev_id2)) => {
// After merge
let prev_handle1 = engine.load_block_handle(&prev_id1)?.ok_or_else(
|| error!("Cannot load handle for prev1 block {}", prev_id1)
)?;
engine.store_block_next1(&prev_handle1, handle.id())?;
let prev_handle2 = engine.load_block_handle(&prev_id2)?.ok_or_else(
|| error!("Cannot load handle for prev2 block {}", prev_id2)
)?;
engine.store_block_next1(&prev_handle2, handle.id())?;
engine.store_block_prev1(handle, &prev_id1)?;
engine.store_block_prev2(handle, &prev_id2)?;
},
(prev_id, None) => {
// if after split and it is second ("1" branch) shard - set next2 for prev block
let prev_shard = prev_id.shard().clone();
let shard = handle.id().shard().clone();
let prev_handle = engine.load_block_handle(&prev_id)?.ok_or_else(
|| error!("Cannot load handle for prev block {}", prev_id)
)?;
if (prev_shard != shard) && (prev_shard.split()?.1 == shard) {
engine.store_block_next2(&prev_handle, handle.id())?;
} else {
engine.store_block_next1(&prev_handle, handle.id())?;
}
engine.store_block_prev1(handle, &prev_id)?;
}
}
Ok(())
}
| 36.434783 | 92 | 0.59467 |
0e5a456344e0a3678db272a2b927881cad15791b | 6,197 | mod rt_util;
mod tree_node;
use crate::rt_util::*;
use lazy_static::lazy_static;
use lrlex::lrlex_mod;
use lrpar::lrpar_mod;
use regex::Regex;
use std::fs;
use std::io::{self, BufRead, ErrorKind, Write};
// Using `lrlex_mod!` brings the lexer for `calc.l` into scope. By default the
// module name will be `calc_l` (i.e. the file name, minus any extensions,
// with a suffix of `_l`).
lrlex_mod!("lexer.l");
// Using `lrpar_mod!` brings the parser for `calc.y` into scope. By default the
// module name will be `calc_y` (i.e. the file name, minus any extensions,
// with a suffix of `_y`).
lrpar_mod!("parser.y");
const VERSION: &str = "0.2.6";
lazy_static! {
static ref EXIT: Vec<String> = {
let v = vec!["exit".to_string(), "q".to_string(), "quit".to_string()];
v
};
static ref SUFFIX: Regex = Regex::new("^.*\\.pg$").unwrap();
}
fn info() {
println!(r" _ ");
println!(r" ____ ___ ____ ____ ___ __(_)___ ");
println!(r" / __ \/ _ \/ __ \/ __ `/ / / / / __ \");
println!(r" / /_/ / __/ / / / /_/ / /_/ / / / / /");
println!(r" / .___/\___/_/ /_/\__, /\__,_/_/_/ /_/ ");
println!(r"/_/ /____/ ");
println!("Penguin compiler: version {}", VERSION);
}
fn main() {
info();
#[cfg(feature = "debug")]
{
println!("### debug mode is open. ###");
}
// create directory for graph that user will draw
match fs::create_dir("graph") {
Err(e) => match e.kind() {
ErrorKind::AlreadyExists => {}
_ => eprintln!("!{:?}", e),
},
Ok(_) => {
println!("# Created directory `graph` for output.")
}
}
let runtime = RunTime::new();
let args = std::env::args();
if args.len() < 2 {
// interactive shell mode
shell(runtime);
} else {
// read file and do the interpretation
// second argument is the file name
let mut file = String::new();
for (index, arg) in args.enumerate() {
if index == 1 {
file = arg;
break;
}
}
#[cfg(feature = "debug")]
{
println!("# file is: {}", file);
}
if SUFFIX.is_match(&file) {
crate::file(runtime, fs::read_to_string(file).unwrap().to_lowercase());
} else {
eprintln!("Error: file format is not support");
}
}
}
fn file(mut rt: RunTime, file: String) {
#[cfg(feature = "debug")]
{
println!("# whole file:");
println!("------------------------------");
println!("{}", file);
println!("------------------------------");
println!("# program launch");
}
let file: String = file
.split('\n')
.filter(|x| !x.is_empty() && !x.starts_with("//") && !x.starts_with("--"))
.map(|x| {
&x[0..match x.find("//") {
None => x.len(),
Some(index) => index,
}]
})
.map(|x| {
&x[0..match x.find("--") {
None => x.len(),
Some(index) => index,
}]
})
.collect();
#[cfg(feature = "debug")]
{
println!("# file delete comment:");
println!("------------------------------");
println!("{}", file);
println!("------------------------------");
}
// todo 文件中混杂exit怎么办?
file.split(';')
.into_iter()
.map(|stat| stat.trim_start().trim_end())
.filter(|stat| !stat.is_empty())
.for_each(|stat| {
#[cfg(feature = "debug")]
{
println!("# file statement: {}", stat);
println!("# len of line: {}", stat.len());
}
let mut stat = stat.to_string();
if !EXIT.contains(&stat) {
stat = format!("{};", stat);
}
rt.run(stat.as_str());
})
}
fn shell(mut rt: RunTime) {
let stdin = io::stdin();
let mut gl_input = String::new();
let mut is_continue = false;
loop {
if is_continue {
print!("... ");
} else {
print!(">>> ");
}
io::stdout().flush().ok();
match stdin.lock().lines().next() {
Some(Ok(ref l)) => {
if l.trim().is_empty() {
continue;
}
if l.starts_with("//") || l.starts_with("--") {
continue;
}
// Now we create a lexer with the `lexer` method with which
// we can lex an input.
let l = l[0..match l.find("//") {
None => l.len(),
Some(idx) => idx,
}]
.to_string();
let l = l[0..match l.find("--") {
None => l.len(),
Some(idx) => idx,
}]
.to_string();
// if is not the end of line, continue
gl_input += &l.trim_end().to_lowercase();
if !gl_input.ends_with(';') && !EXIT.contains(&gl_input) {
is_continue = true;
continue;
}
#[cfg(feature = "debug")]
{
println!("global input: {}", gl_input);
}
for v in gl_input.split(';') {
if v.is_empty() {
break;
}
#[cfg(feature = "debug")]
{
println!("v: {}", v);
println!("v's len: {}", v.len());
}
let mut v = String::from(v);
if !EXIT.contains(&v.to_string()) {
v = format!("{};", v);
}
rt.run(v.as_str());
}
// prepare for new input
gl_input.clear();
is_continue = false;
}
_ => break,
}
}
}
| 29.509524 | 83 | 0.404873 |
e4c9ee0d506df766379959ec1f1f059692ebbbec | 8,788 | #[doc = "Register `INTENCLR` reader"]
pub struct R(crate::R<INTENCLR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<INTENCLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<INTENCLR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<INTENCLR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `INTENCLR` writer"]
pub struct W(crate::W<INTENCLR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<INTENCLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<INTENCLR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<INTENCLR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `DRE` reader - Data Register Empty Interrupt Disable"]
pub struct DRE_R(crate::FieldReader<bool, bool>);
impl DRE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
DRE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DRE_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DRE` writer - Data Register Empty Interrupt Disable"]
pub struct DRE_W<'a> {
w: &'a mut W,
}
impl<'a> DRE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u8 & 0x01);
self.w
}
}
#[doc = "Field `TXC` reader - Transmit Complete Interrupt Disable"]
pub struct TXC_R(crate::FieldReader<bool, bool>);
impl TXC_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
TXC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXC_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXC` writer - Transmit Complete Interrupt Disable"]
pub struct TXC_W<'a> {
w: &'a mut W,
}
impl<'a> TXC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u8 & 0x01) << 1);
self.w
}
}
#[doc = "Field `RXC` reader - Receive Complete Interrupt Disable"]
pub struct RXC_R(crate::FieldReader<bool, bool>);
impl RXC_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
RXC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RXC_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RXC` writer - Receive Complete Interrupt Disable"]
pub struct RXC_W<'a> {
w: &'a mut W,
}
impl<'a> RXC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u8 & 0x01) << 2);
self.w
}
}
#[doc = "Field `SSL` reader - Slave Select Low Interrupt Disable"]
pub struct SSL_R(crate::FieldReader<bool, bool>);
impl SSL_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
SSL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SSL_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SSL` writer - Slave Select Low Interrupt Disable"]
pub struct SSL_W<'a> {
w: &'a mut W,
}
impl<'a> SSL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u8 & 0x01) << 3);
self.w
}
}
#[doc = "Field `ERROR` reader - Combined Error Interrupt Disable"]
pub struct ERROR_R(crate::FieldReader<bool, bool>);
impl ERROR_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
ERROR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for ERROR_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ERROR` writer - Combined Error Interrupt Disable"]
pub struct ERROR_W<'a> {
w: &'a mut W,
}
impl<'a> ERROR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u8 & 0x01) << 7);
self.w
}
}
impl R {
#[doc = "Bit 0 - Data Register Empty Interrupt Disable"]
#[inline(always)]
pub fn dre(&self) -> DRE_R {
DRE_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Transmit Complete Interrupt Disable"]
#[inline(always)]
pub fn txc(&self) -> TXC_R {
TXC_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Receive Complete Interrupt Disable"]
#[inline(always)]
pub fn rxc(&self) -> RXC_R {
RXC_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Slave Select Low Interrupt Disable"]
#[inline(always)]
pub fn ssl(&self) -> SSL_R {
SSL_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 7 - Combined Error Interrupt Disable"]
#[inline(always)]
pub fn error(&self) -> ERROR_R {
ERROR_R::new(((self.bits >> 7) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Data Register Empty Interrupt Disable"]
#[inline(always)]
pub fn dre(&mut self) -> DRE_W {
DRE_W { w: self }
}
#[doc = "Bit 1 - Transmit Complete Interrupt Disable"]
#[inline(always)]
pub fn txc(&mut self) -> TXC_W {
TXC_W { w: self }
}
#[doc = "Bit 2 - Receive Complete Interrupt Disable"]
#[inline(always)]
pub fn rxc(&mut self) -> RXC_W {
RXC_W { w: self }
}
#[doc = "Bit 3 - Slave Select Low Interrupt Disable"]
#[inline(always)]
pub fn ssl(&mut self) -> SSL_W {
SSL_W { w: self }
}
#[doc = "Bit 7 - Combined Error Interrupt Disable"]
#[inline(always)]
pub fn error(&mut self) -> ERROR_W {
ERROR_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u8) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "SPIS Interrupt Enable Clear\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [intenclr](index.html) module"]
pub struct INTENCLR_SPEC;
impl crate::RegisterSpec for INTENCLR_SPEC {
type Ux = u8;
}
#[doc = "`read()` method returns [intenclr::R](R) reader structure"]
impl crate::Readable for INTENCLR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [intenclr::W](W) writer structure"]
impl crate::Writable for INTENCLR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets INTENCLR to value 0"]
impl crate::Resettable for INTENCLR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.099338 | 416 | 0.570323 |
e64614999470f2d02ac72e235be54610324a103b | 666 | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
extern crate lang_items;
fn main() {
panic!("Bye world!")
}
| 30.272727 | 75 | 0.726727 |
2918bb6c5cbb7c16160649948c92c5cfa485fc45 | 3,867 | use crate::operations::*;
use crate::ReadonlyString;
use azure_core::{Pipeline, Request};
use bytes::Bytes;
use super::*;
/// A client for Cosmos attachment resources.
#[derive(Debug, Clone)]
pub struct AttachmentClient {
document_client: DocumentClient,
attachment_name: ReadonlyString,
}
impl AttachmentClient {
/// Create a new client
pub(crate) fn new<S: Into<ReadonlyString>>(
document_client: DocumentClient,
attachment_name: S,
) -> Self {
Self {
document_client,
attachment_name: attachment_name.into(),
}
}
/// Get a [`CosmosClient`].
pub fn cosmos_client(&self) -> &CosmosClient {
self.document_client().cosmos_client()
}
/// Get a [`DatabaseClient`].
pub fn database_client(&self) -> &DatabaseClient {
self.document_client().database_client()
}
/// Get a [`CollectionClient`].
pub fn collection_client(&self) -> &CollectionClient {
self.document_client().collection_client()
}
/// Get a [`DocumentClient`].
pub fn document_client(&self) -> &DocumentClient {
&self.document_client
}
/// Get the attachment name.
pub fn attachment_name(&self) -> &str {
&self.attachment_name
}
/// Initiate a request to get an attachment.
pub fn get(&self) -> GetAttachmentBuilder {
GetAttachmentBuilder::new(self.clone())
}
/// Initiate a request to delete an attachment.
pub fn delete(&self) -> DeleteAttachmentBuilder {
DeleteAttachmentBuilder::new(self.clone())
}
/// Initiate a request to create an attachment with a slug.
pub fn create_slug(&self, body: Bytes) -> CreateOrReplaceSlugAttachmentBuilder {
CreateOrReplaceSlugAttachmentBuilder::new(self.clone(), true, body)
}
/// Initiate a request to replace an attachment.
pub fn replace_slug(&self, body: Bytes) -> CreateOrReplaceSlugAttachmentBuilder {
CreateOrReplaceSlugAttachmentBuilder::new(self.clone(), false, body)
}
/// Initiate a request to create a reference attachment.
pub fn create_attachment<M, C>(
&self,
media: M,
content_type: C,
) -> CreateOrReplaceAttachmentBuilder
where
M: Into<String>,
C: Into<String>,
{
CreateOrReplaceAttachmentBuilder::new(self.clone(), true, media.into(), content_type.into())
}
/// Initiate a request to replace an attachment.
pub fn replace_attachment<M, C>(
&self,
media: M,
content_type: C,
) -> CreateOrReplaceAttachmentBuilder
where
M: Into<String>,
C: Into<String>,
{
CreateOrReplaceAttachmentBuilder::new(
self.clone(),
false,
media.into(),
content_type.into(),
)
}
pub(crate) fn prepare_pipeline(&self, method: http::Method) -> Request {
self.cosmos_client().prepare_request_pipeline(
&format!(
"dbs/{}/colls/{}/docs/{}/attachments",
self.database_client().database_name(),
self.collection_client().collection_name(),
self.document_client().document_name(),
),
method,
)
}
pub(crate) fn prepare_pipeline_with_attachment_name(&self, method: http::Method) -> Request {
self.cosmos_client().prepare_request_pipeline(
&format!(
"dbs/{}/colls/{}/docs/{}/attachments/{}",
self.database_client().database_name(),
self.collection_client().collection_name(),
self.document_client().document_name(),
self.attachment_name()
),
method,
)
}
pub(crate) fn pipeline(&self) -> &Pipeline {
self.cosmos_client().pipeline()
}
}
| 29.295455 | 100 | 0.602534 |
391ea762a083b003f3b71442b9694e2599aeb75b | 18,468 | use crate::ty::context::TyCtxt;
use crate::ty::query::plumbing::CycleError;
use crate::ty::query::Query;
use crate::ty::tls;
use rustc_data_structures::sync::Lrc;
use syntax_pos::Span;
#[cfg(not(parallel_compiler))]
use std::ptr;
#[cfg(parallel_compiler)]
use {
parking_lot::{Mutex, Condvar},
rustc_data_structures::{jobserver, OnDrop},
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::stable_hasher::{StableHasher, HashStable},
rustc_data_structures::sync::Lock,
rustc_rayon_core as rayon_core,
syntax_pos::DUMMY_SP,
std::{mem, process, thread},
std::iter::FromIterator,
};
/// Indicates the state of a query for a given key in a query map.
pub(super) enum QueryResult<'tcx> {
/// An already executing query. The query job can be used to await for its completion.
Started(Lrc<QueryJob<'tcx>>),
/// The query panicked. Queries trying to wait on this will raise a fatal error or
/// silently panic.
Poisoned,
}
/// Represents a span and a query key.
#[derive(Clone, Debug)]
pub struct QueryInfo<'tcx> {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
pub query: Query<'tcx>,
}
/// Representss an object representing an active query job.
pub struct QueryJob<'tcx> {
pub info: QueryInfo<'tcx>,
/// The parent query job which created this job and is implicitly waiting on it.
pub parent: Option<Lrc<QueryJob<'tcx>>>,
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
latch: QueryLatch<'tcx>,
}
impl<'tcx> QueryJob<'tcx> {
/// Creates a new query job.
pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
QueryJob {
info,
parent,
#[cfg(parallel_compiler)]
latch: QueryLatch::new(),
}
}
/// Awaits for the query job to complete.
#[cfg(parallel_compiler)]
pub(super) fn r#await(
&self,
tcx: TyCtxt<'tcx>,
span: Span,
) -> Result<(), CycleError<'tcx>> {
tls::with_related_context(tcx, move |icx| {
let waiter = Lrc::new(QueryWaiter {
query: icx.query.clone(),
span,
cycle: Lock::new(None),
condvar: Condvar::new(),
});
self.latch.r#await(&waiter);
// FIXME: Get rid of this lock. We have ownership of the QueryWaiter
// although another thread may still have a Lrc reference so we cannot
// use Lrc::get_mut
let mut cycle = waiter.cycle.lock();
match cycle.take() {
None => Ok(()),
Some(cycle) => Err(cycle)
}
})
}
#[cfg(not(parallel_compiler))]
pub(super) fn find_cycle_in_stack(&self, tcx: TyCtxt<'tcx>, span: Span) -> CycleError<'tcx> {
// Get the current executing query (waiter) and find the waitee amongst its parents
let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone());
let mut cycle = Vec::new();
while let Some(job) = current_job {
cycle.push(job.info.clone());
if ptr::eq(&*job, self) {
cycle.reverse();
// This is the end of the cycle
// The span entry we included was for the usage
// of the cycle itself, and not part of the cycle
// Replace it with the span which caused the cycle to form
cycle[0].span = span;
// Find out why the cycle itself was used
let usage = job.parent.as_ref().map(|parent| {
(job.info.span, parent.info.query.clone())
});
return CycleError { usage, cycle };
}
current_job = job.parent.clone();
}
panic!("did not find a cycle")
}
/// Signals to waiters that the query is complete.
///
/// This does nothing for single threaded rustc,
/// as there are no concurrent jobs which could be waiting on us
pub fn signal_complete(&self) {
#[cfg(parallel_compiler)]
self.latch.set();
}
#[cfg(parallel_compiler)]
fn as_ptr(&self) -> *const QueryJob<'tcx> {
self as *const _
}
}
#[cfg(parallel_compiler)]
struct QueryWaiter<'tcx> {
query: Option<Lrc<QueryJob<'tcx>>>,
condvar: Condvar,
span: Span,
cycle: Lock<Option<CycleError<'tcx>>>,
}
#[cfg(parallel_compiler)]
impl<'tcx> QueryWaiter<'tcx> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
}
}
#[cfg(parallel_compiler)]
struct QueryLatchInfo<'tcx> {
complete: bool,
waiters: Vec<Lrc<QueryWaiter<'tcx>>>,
}
#[cfg(parallel_compiler)]
struct QueryLatch<'tcx> {
info: Mutex<QueryLatchInfo<'tcx>>,
}
#[cfg(parallel_compiler)]
impl<'tcx> QueryLatch<'tcx> {
fn new() -> Self {
QueryLatch {
info: Mutex::new(QueryLatchInfo {
complete: false,
waiters: Vec::new(),
}),
}
}
/// Awaits the caller on this latch by blocking the current thread.
fn r#await(&self, waiter: &Lrc<QueryWaiter<'tcx>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
// the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
// Both of these will remove it from the `waiters` list before resuming
// this thread.
info.waiters.push(waiter.clone());
// If this detects a deadlock and the deadlock handler wants to resume this thread
// we have to be in the `wait` call. This is ensured by the deadlock handler
// getting the self.info lock.
rayon_core::mark_blocked();
jobserver::release_thread();
waiter.condvar.wait(&mut info);
// Release the lock before we potentially block in `acquire_thread`
mem::drop(info);
jobserver::acquire_thread();
}
}
/// Sets the latch and resumes all waiters on it
fn set(&self) {
let mut info = self.info.lock();
debug_assert!(!info.complete);
info.complete = true;
let registry = rayon_core::Registry::current();
for waiter in info.waiters.drain(..) {
waiter.notify(®istry);
}
}
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
fn extract_waiter(
&self,
waiter: usize,
) -> Lrc<QueryWaiter<'tcx>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
info.waiters.remove(waiter)
}
}
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
#[cfg(parallel_compiler)]
type Waiter<'tcx> = (Lrc<QueryJob<'tcx>>, usize);
/// Visits all the non-resumable and resumable waiters of a query.
/// Only waiters in a query are visited.
/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
/// and a span indicating the reason the query waited on `query_ref`.
/// If `visit` returns Some, this function returns.
/// For visits of non-resumable waiters it returns the return value of `visit`.
/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
fn visit_waiters<'tcx, F>(query: Lrc<QueryJob<'tcx>>, mut visit: F) -> Option<Option<Waiter<'tcx>>>
where
F: FnMut(Span, Lrc<QueryJob<'tcx>>) -> Option<Option<Waiter<'tcx>>>
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(ref parent) = query.parent {
if let Some(cycle) = visit(query.info.span, parent.clone()) {
return Some(cycle);
}
}
// Visit the explicit waiters which use condvars and are resumable
for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() {
if let Some(ref waiter_query) = waiter.query {
if visit(waiter.span, waiter_query.clone()).is_some() {
// Return a value which indicates that this waiter can be resumed
return Some(Some((query.clone(), i)));
}
}
}
None
}
/// Look for query cycles by doing a depth first search starting at `query`.
/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
span: Span,
stack: &mut Vec<(Span, Lrc<QueryJob<'tcx>>)>,
visited: &mut FxHashSet<*const QueryJob<'tcx>>
) -> Option<Option<Waiter<'tcx>>> {
if !visited.insert(query.as_ptr()) {
return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) {
// We detected a query cycle, fix up the initial span and return Some
// Remove previous stack entries
stack.drain(0..p);
// Replace the span for the first query with the cycle cause
stack[0].0 = span;
Some(None)
} else {
None
}
}
// Query marked as visited is added it to the stack
stack.push((span, query.clone()));
// Visit all the waiters
let r = visit_waiters(query, |span, successor| {
cycle_check(successor, span, stack, visited)
});
// Remove the entry in our stack if we didn't find a cycle
if r.is_none() {
stack.pop();
}
r
}
/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
fn connected_to_root<'tcx>(
query: Lrc<QueryJob<'tcx>>,
visited: &mut FxHashSet<*const QueryJob<'tcx>>
) -> bool {
// We already visited this or we're deliberately ignoring it
if !visited.insert(query.as_ptr()) {
return false;
}
// This query is connected to the root (it has no query parent), return true
if query.parent.is_none() {
return true;
}
visit_waiters(query, |_, successor| {
if connected_to_root(successor, visited) {
Some(None)
} else {
None
}
}).is_some()
}
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc<QueryJob<'tcx>>)>(
tcx: TyCtxt<'tcx>,
queries: &'a [T],
f: F,
) -> &'a T {
// Deterministically pick an entry point
// FIXME: Sort this instead
let mut hcx = tcx.create_stable_hashing_context();
queries.iter().min_by_key(|v| {
let (span, query) = f(v);
let mut stable_hasher = StableHasher::new();
query.info.query.hash_stable(&mut hcx, &mut stable_hasher);
// Prefer entry points which have valid spans for nicer error messages
// We add an integer to the tuple ensuring that entry points
// with valid spans are picked first
let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
(span_cmp, stable_hasher.finish::<u64>())
}).unwrap()
}
/// Looks for query cycles starting from the last query in `jobs`.
/// If a cycle is found, all queries in the cycle is removed from `jobs` and
/// the function return true.
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
fn remove_cycle<'tcx>(
jobs: &mut Vec<Lrc<QueryJob<'tcx>>>,
wakelist: &mut Vec<Lrc<QueryWaiter<'tcx>>>,
tcx: TyCtxt<'tcx>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
// Look for a cycle starting with the last query in `jobs`
if let Some(waiter) = cycle_check(jobs.pop().unwrap(),
DUMMY_SP,
&mut stack,
&mut visited) {
// The stack is a vector of pairs of spans and queries; reverse it so that
// the earlier entries require later entries
let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
// Shift the spans so that queries are matched with the span for their waitee
spans.rotate_right(1);
// Zip them back together
let mut stack: Vec<_> = spans.into_iter().zip(queries).collect();
// Remove the queries in our cycle from the list of jobs to look at
for r in &stack {
if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) {
jobs.remove(pos);
}
}
// Find the queries in the cycle which are
// connected to queries outside the cycle
let entry_points = stack.iter().filter_map(|(span, query)| {
if query.parent.is_none() {
// This query is connected to the root (it has no query parent)
Some((*span, query.clone(), None))
} else {
let mut waiters = Vec::new();
// Find all the direct waiters who lead to the root
visit_waiters(query.clone(), |span, waiter| {
// Mark all the other queries in the cycle as already visited
let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1.as_ptr()));
if connected_to_root(waiter.clone(), &mut visited) {
waiters.push((span, waiter));
}
None
});
if waiters.is_empty() {
None
} else {
// Deterministically pick one of the waiters to show to the user
let waiter = pick_query(tcx, &waiters, |s| s.clone()).clone();
Some((*span, query.clone(), Some(waiter)))
}
}
}).collect::<Vec<(Span, Lrc<QueryJob<'tcx>>, Option<(Span, Lrc<QueryJob<'tcx>>)>)>>();
// Deterministically pick an entry point
let (_, entry_point, usage) = pick_query(tcx, &entry_points, |e| (e.0, e.1.clone()));
// Shift the stack so that our entry point is first
let entry_point_pos = stack.iter().position(|(_, query)| {
query.as_ptr() == entry_point.as_ptr()
});
if let Some(pos) = entry_point_pos {
stack.rotate_left(pos);
}
let usage = usage.as_ref().map(|(span, query)| (*span, query.info.query.clone()));
// Create the cycle error
let error = CycleError {
usage,
cycle: stack.iter().map(|&(s, ref q)| QueryInfo {
span: s,
query: q.info.query.clone(),
} ).collect(),
};
// We unwrap `waiter` here since there must always be one
// edge which is resumeable / waited using a query latch
let (waitee_query, waiter_idx) = waiter.unwrap();
// Extract the waiter we want to resume
let waiter = waitee_query.latch.extract_waiter(waiter_idx);
// Set the cycle error so it will be picked up when resumed
*waiter.cycle.lock() = Some(error);
// Put the waiter on the list of things to resume
wakelist.push(waiter);
true
} else {
false
}
}
/// Creates a new thread and forwards information in thread locals to it.
/// The new thread runs the deadlock handler.
/// Must only be called when a deadlock is about to happen.
#[cfg(parallel_compiler)]
pub unsafe fn handle_deadlock() {
let registry = rayon_core::Registry::current();
let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| {
gcx_ptr as *const _
});
let gcx_ptr = &*gcx_ptr;
let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| {
syntax_pos_globals as *const _
});
let syntax_pos_globals = &*syntax_pos_globals;
thread::spawn(move || {
tls::GCX_PTR.set(gcx_ptr, || {
syntax_pos::GLOBALS.set(syntax_pos_globals, || {
syntax_pos::GLOBALS.set(syntax_pos_globals, || {
tls::with_thread_locals(|| {
tls::with_global(|tcx| deadlock(tcx, ®istry))
})
})
})
})
});
}
/// Detects query cycles by using depth first search over all active query jobs.
/// If a query cycle is found it will break the cycle by finding an edge which
/// uses a query latch and then resuming that waiter.
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
fn deadlock(tcx: TyCtxt<'_>, registry: &rayon_core::Registry) {
let on_panic = OnDrop(|| {
eprintln!("deadlock handler panicked, aborting process");
process::abort();
});
let mut wakelist = Vec::new();
let mut jobs: Vec<_> = tcx.queries.collect_active_jobs();
let mut found_cycle = false;
while jobs.len() > 0 {
if remove_cycle(&mut jobs, &mut wakelist, tcx) {
found_cycle = true;
}
}
// Check that a cycle was found. It is possible for a deadlock to occur without
// a query cycle if a query which can be waited on uses Rayon to do multithreading
// internally. Such a query (X) may be executing on 2 threads (A and B) and A may
// wait using Rayon on B. Rayon may then switch to executing another query (Y)
// which in turn will wait on X causing a deadlock. We have a false dependency from
// X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
// only considers the true dependency and won't detect a cycle.
assert!(found_cycle);
// FIXME: Ensure this won't cause a deadlock before we return
for waiter in wakelist.into_iter() {
waiter.notify(registry);
}
on_panic.disable();
}
| 35.515385 | 99 | 0.593784 |
33caedfc198260f846c51c3f6fbabe54d69ee932 | 18,104 | //! Lints, aka compiler warnings.
//!
//! A 'lint' check is a kind of miscellaneous constraint that a user _might_
//! want to enforce, but might reasonably want to permit as well, on a
//! module-by-module basis. They contrast with static constraints enforced by
//! other phases of the compiler, which are generally required to hold in order
//! to compile the program at all.
//!
//! Most lints can be written as [LintPass] instances. These run after
//! all other analyses. The `LintPass`es built into rustc are defined
//! within [rustc_session::lint::builtin],
//! which has further comments on how to add such a lint.
//! rustc can also load user-defined lint plugins via the plugin mechanism.
//!
//! Some of rustc's lints are defined elsewhere in the compiler and work by
//! calling `add_lint()` on the overall `Session` object. This works when
//! it happens before the main lint pass, which emits the lints stored by
//! `add_lint()`. To emit lints after the main lint pass (from codegen, for
//! example) requires more effort. See `emit_lint` and `GatherNodeLevels`
//! in `context.rs`.
//!
//! Some code also exists in [rustc_session::lint], [rustc_middle::lint].
//!
//! ## Note
//!
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![cfg_attr(test, feature(test))]
#![feature(array_windows)]
#![feature(bool_to_option)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
#![feature(iter_order_by)]
#![feature(never_type)]
#![feature(nll)]
#![feature(or_patterns)]
#![recursion_limit = "256"]
#[macro_use]
extern crate rustc_middle;
#[macro_use]
extern crate rustc_session;
mod array_into_iter;
pub mod builtin;
mod context;
mod early;
mod internal;
mod late;
mod levels;
mod non_ascii_idents;
mod nonstandard_style;
mod passes;
mod redundant_semicolon;
mod types;
mod unused;
use rustc_ast as ast;
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::{
BARE_TRAIT_OBJECTS, BROKEN_INTRA_DOC_LINKS, ELIDED_LIFETIMES_IN_PATHS,
EXPLICIT_OUTLIVES_REQUIREMENTS, INVALID_CODEBLOCK_ATTRIBUTES, MISSING_DOC_CODE_EXAMPLES,
PRIVATE_DOC_TESTS,
};
use rustc_span::symbol::{Ident, Symbol};
use rustc_span::Span;
use array_into_iter::ArrayIntoIter;
use builtin::*;
use internal::*;
use non_ascii_idents::*;
use nonstandard_style::*;
use redundant_semicolon::*;
use types::*;
use unused::*;
/// Useful for other parts of the compiler / Clippy.
pub use builtin::SoftLints;
pub use context::{CheckLintNameResult, EarlyContext, LateContext, LintContext, LintStore};
pub use early::check_ast_crate;
pub use late::check_crate;
pub use passes::{EarlyLintPass, LateLintPass};
pub use rustc_session::lint::Level::{self, *};
pub use rustc_session::lint::{BufferedEarlyLint, FutureIncompatibleInfo, Lint, LintId};
pub use rustc_session::lint::{LintArray, LintPass};
pub fn provide(providers: &mut Providers) {
levels::provide(providers);
*providers = Providers { lint_mod, ..*providers };
}
fn lint_mod(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
late::late_lint_mod(tcx, module_def_id, BuiltinCombinedModuleLateLintPass::new());
}
macro_rules! pre_expansion_lint_passes {
($macro:path, $args:tt) => {
$macro!($args, [KeywordIdents: KeywordIdents,]);
};
}
macro_rules! early_lint_passes {
($macro:path, $args:tt) => {
$macro!(
$args,
[
UnusedParens: UnusedParens,
UnusedBraces: UnusedBraces,
UnusedImportBraces: UnusedImportBraces,
UnsafeCode: UnsafeCode,
AnonymousParameters: AnonymousParameters,
EllipsisInclusiveRangePatterns: EllipsisInclusiveRangePatterns::default(),
NonCamelCaseTypes: NonCamelCaseTypes,
DeprecatedAttr: DeprecatedAttr::new(),
WhileTrue: WhileTrue,
NonAsciiIdents: NonAsciiIdents,
IncompleteFeatures: IncompleteFeatures,
RedundantSemicolons: RedundantSemicolons,
UnusedDocComment: UnusedDocComment,
]
);
};
}
macro_rules! declare_combined_early_pass {
([$name:ident], $passes:tt) => (
early_lint_methods!(declare_combined_early_lint_pass, [pub $name, $passes]);
)
}
pre_expansion_lint_passes!(declare_combined_early_pass, [BuiltinCombinedPreExpansionLintPass]);
early_lint_passes!(declare_combined_early_pass, [BuiltinCombinedEarlyLintPass]);
macro_rules! late_lint_passes {
($macro:path, $args:tt) => {
$macro!(
$args,
[
// FIXME: Look into regression when this is used as a module lint
// May Depend on constants elsewhere
UnusedBrokenConst: UnusedBrokenConst,
// Uses attr::is_used which is untracked, can't be an incremental module pass.
UnusedAttributes: UnusedAttributes::new(),
// Needs to run after UnusedAttributes as it marks all `feature` attributes as used.
UnstableFeatures: UnstableFeatures,
// Tracks state across modules
UnnameableTestItems: UnnameableTestItems::new(),
// Tracks attributes of parents
MissingDoc: MissingDoc::new(),
// Depends on access levels
// FIXME: Turn the computation of types which implement Debug into a query
// and change this to a module lint pass
MissingDebugImplementations: MissingDebugImplementations::default(),
ArrayIntoIter: ArrayIntoIter,
ClashingExternDeclarations: ClashingExternDeclarations::new(),
]
);
};
}
macro_rules! late_lint_mod_passes {
($macro:path, $args:tt) => {
$macro!(
$args,
[
HardwiredLints: HardwiredLints,
ImproperCTypesDeclarations: ImproperCTypesDeclarations,
ImproperCTypesDefinitions: ImproperCTypesDefinitions,
VariantSizeDifferences: VariantSizeDifferences,
BoxPointers: BoxPointers,
PathStatements: PathStatements,
// Depends on referenced function signatures in expressions
UnusedResults: UnusedResults,
NonUpperCaseGlobals: NonUpperCaseGlobals,
NonShorthandFieldPatterns: NonShorthandFieldPatterns,
UnusedAllocation: UnusedAllocation,
// Depends on types used in type definitions
MissingCopyImplementations: MissingCopyImplementations,
// Depends on referenced function signatures in expressions
MutableTransmutes: MutableTransmutes,
TypeAliasBounds: TypeAliasBounds,
TrivialConstraints: TrivialConstraints,
TypeLimits: TypeLimits::new(),
NonSnakeCase: NonSnakeCase,
InvalidNoMangleItems: InvalidNoMangleItems,
// Depends on access levels
UnreachablePub: UnreachablePub,
ExplicitOutlivesRequirements: ExplicitOutlivesRequirements,
InvalidValue: InvalidValue,
]
);
};
}
macro_rules! declare_combined_late_pass {
([$v:vis $name:ident], $passes:tt) => (
late_lint_methods!(declare_combined_late_lint_pass, [$v $name, $passes], ['tcx]);
)
}
// FIXME: Make a separate lint type which do not require typeck tables
late_lint_passes!(declare_combined_late_pass, [pub BuiltinCombinedLateLintPass]);
late_lint_mod_passes!(declare_combined_late_pass, [BuiltinCombinedModuleLateLintPass]);
pub fn new_lint_store(no_interleave_lints: bool, internal_lints: bool) -> LintStore {
let mut lint_store = LintStore::new();
register_builtins(&mut lint_store, no_interleave_lints);
if internal_lints {
register_internals(&mut lint_store);
}
lint_store
}
/// Tell the `LintStore` about all the built-in lints (the ones
/// defined in this crate and the ones defined in
/// `rustc_session::lint::builtin`).
fn register_builtins(store: &mut LintStore, no_interleave_lints: bool) {
macro_rules! add_lint_group {
($name:expr, $($lint:ident),*) => (
store.register_group(false, $name, None, vec![$(LintId::of($lint)),*]);
)
}
macro_rules! register_pass {
($method:ident, $ty:ident, $constructor:expr) => {
store.register_lints(&$ty::get_lints());
store.$method(|| box $constructor);
};
}
macro_rules! register_passes {
($method:ident, [$($passes:ident: $constructor:expr,)*]) => (
$(
register_pass!($method, $passes, $constructor);
)*
)
}
if no_interleave_lints {
pre_expansion_lint_passes!(register_passes, register_pre_expansion_pass);
early_lint_passes!(register_passes, register_early_pass);
late_lint_passes!(register_passes, register_late_pass);
late_lint_mod_passes!(register_passes, register_late_mod_pass);
} else {
store.register_lints(&BuiltinCombinedPreExpansionLintPass::get_lints());
store.register_lints(&BuiltinCombinedEarlyLintPass::get_lints());
store.register_lints(&BuiltinCombinedModuleLateLintPass::get_lints());
store.register_lints(&BuiltinCombinedLateLintPass::get_lints());
}
add_lint_group!(
"nonstandard_style",
NON_CAMEL_CASE_TYPES,
NON_SNAKE_CASE,
NON_UPPER_CASE_GLOBALS
);
add_lint_group!(
"unused",
UNUSED_IMPORTS,
UNUSED_VARIABLES,
UNUSED_ASSIGNMENTS,
DEAD_CODE,
UNUSED_MUT,
UNREACHABLE_CODE,
UNREACHABLE_PATTERNS,
OVERLAPPING_PATTERNS,
UNUSED_MUST_USE,
UNUSED_UNSAFE,
PATH_STATEMENTS,
UNUSED_ATTRIBUTES,
UNUSED_MACROS,
UNUSED_ALLOCATION,
UNUSED_DOC_COMMENTS,
UNUSED_EXTERN_CRATES,
UNUSED_FEATURES,
UNUSED_LABELS,
UNUSED_PARENS,
UNUSED_BRACES,
REDUNDANT_SEMICOLONS
);
add_lint_group!(
"rust_2018_idioms",
BARE_TRAIT_OBJECTS,
UNUSED_EXTERN_CRATES,
ELLIPSIS_INCLUSIVE_RANGE_PATTERNS,
ELIDED_LIFETIMES_IN_PATHS,
EXPLICIT_OUTLIVES_REQUIREMENTS // FIXME(#52665, #47816) not always applicable and not all
// macros are ready for this yet.
// UNREACHABLE_PUB,
// FIXME macro crates are not up for this yet, too much
// breakage is seen if we try to encourage this lint.
// MACRO_USE_EXTERN_CRATE
);
add_lint_group!(
"rustdoc",
BROKEN_INTRA_DOC_LINKS,
PRIVATE_INTRA_DOC_LINKS,
INVALID_CODEBLOCK_ATTRIBUTES,
MISSING_DOC_CODE_EXAMPLES,
PRIVATE_DOC_TESTS
);
// Register renamed and removed lints.
store.register_renamed("single_use_lifetime", "single_use_lifetimes");
store.register_renamed("elided_lifetime_in_path", "elided_lifetimes_in_paths");
store.register_renamed("bare_trait_object", "bare_trait_objects");
store.register_renamed("unstable_name_collision", "unstable_name_collisions");
store.register_renamed("unused_doc_comment", "unused_doc_comments");
store.register_renamed("async_idents", "keyword_idents");
store.register_renamed("exceeding_bitshifts", "arithmetic_overflow");
store.register_renamed("redundant_semicolon", "redundant_semicolons");
store.register_renamed("intra_doc_link_resolution_failure", "broken_intra_doc_links");
store.register_removed("unknown_features", "replaced by an error");
store.register_removed("unsigned_negation", "replaced by negate_unsigned feature gate");
store.register_removed("negate_unsigned", "cast a signed value instead");
store.register_removed("raw_pointer_derive", "using derive with raw pointers is ok");
// Register lint group aliases.
store.register_group_alias("nonstandard_style", "bad_style");
// This was renamed to `raw_pointer_derive`, which was then removed,
// so it is also considered removed.
store.register_removed("raw_pointer_deriving", "using derive with raw pointers is ok");
store.register_removed("drop_with_repr_extern", "drop flags have been removed");
store.register_removed("fat_ptr_transmutes", "was accidentally removed back in 2014");
store.register_removed("deprecated_attr", "use `deprecated` instead");
store.register_removed(
"transmute_from_fn_item_types",
"always cast functions before transmuting them",
);
store.register_removed(
"hr_lifetime_in_assoc_type",
"converted into hard error, see issue #33685 \
<https://github.com/rust-lang/rust/issues/33685> for more information",
);
store.register_removed(
"inaccessible_extern_crate",
"converted into hard error, see issue #36886 \
<https://github.com/rust-lang/rust/issues/36886> for more information",
);
store.register_removed(
"super_or_self_in_global_path",
"converted into hard error, see issue #36888 \
<https://github.com/rust-lang/rust/issues/36888> for more information",
);
store.register_removed(
"overlapping_inherent_impls",
"converted into hard error, see issue #36889 \
<https://github.com/rust-lang/rust/issues/36889> for more information",
);
store.register_removed(
"illegal_floating_point_constant_pattern",
"converted into hard error, see issue #36890 \
<https://github.com/rust-lang/rust/issues/36890> for more information",
);
store.register_removed(
"illegal_struct_or_enum_constant_pattern",
"converted into hard error, see issue #36891 \
<https://github.com/rust-lang/rust/issues/36891> for more information",
);
store.register_removed(
"lifetime_underscore",
"converted into hard error, see issue #36892 \
<https://github.com/rust-lang/rust/issues/36892> for more information",
);
store.register_removed(
"extra_requirement_in_impl",
"converted into hard error, see issue #37166 \
<https://github.com/rust-lang/rust/issues/37166> for more information",
);
store.register_removed(
"legacy_imports",
"converted into hard error, see issue #38260 \
<https://github.com/rust-lang/rust/issues/38260> for more information",
);
store.register_removed(
"coerce_never",
"converted into hard error, see issue #48950 \
<https://github.com/rust-lang/rust/issues/48950> for more information",
);
store.register_removed(
"resolve_trait_on_defaulted_unit",
"converted into hard error, see issue #48950 \
<https://github.com/rust-lang/rust/issues/48950> for more information",
);
store.register_removed(
"private_no_mangle_fns",
"no longer a warning, `#[no_mangle]` functions always exported",
);
store.register_removed(
"private_no_mangle_statics",
"no longer a warning, `#[no_mangle]` statics always exported",
);
store.register_removed("bad_repr", "replaced with a generic attribute input check");
store.register_removed(
"duplicate_matcher_binding_name",
"converted into hard error, see issue #57742 \
<https://github.com/rust-lang/rust/issues/57742> for more information",
);
store.register_removed(
"incoherent_fundamental_impls",
"converted into hard error, see issue #46205 \
<https://github.com/rust-lang/rust/issues/46205> for more information",
);
store.register_removed(
"legacy_constructor_visibility",
"converted into hard error, see issue #39207 \
<https://github.com/rust-lang/rust/issues/39207> for more information",
);
store.register_removed(
"legacy_directory_ownership",
"converted into hard error, see issue #37872 \
<https://github.com/rust-lang/rust/issues/37872> for more information",
);
store.register_removed(
"safe_extern_statics",
"converted into hard error, see issue #36247 \
<https://github.com/rust-lang/rust/issues/36247> for more information",
);
store.register_removed(
"parenthesized_params_in_types_and_modules",
"converted into hard error, see issue #42238 \
<https://github.com/rust-lang/rust/issues/42238> for more information",
);
store.register_removed(
"duplicate_macro_exports",
"converted into hard error, see issue #35896 \
<https://github.com/rust-lang/rust/issues/35896> for more information",
);
store.register_removed(
"nested_impl_trait",
"converted into hard error, see issue #59014 \
<https://github.com/rust-lang/rust/issues/59014> for more information",
);
store.register_removed("plugin_as_library", "plugins have been deprecated and retired");
}
fn register_internals(store: &mut LintStore) {
store.register_lints(&DefaultHashTypes::get_lints());
store.register_early_pass(|| box DefaultHashTypes::new());
store.register_lints(&LintPassImpl::get_lints());
store.register_early_pass(|| box LintPassImpl);
store.register_lints(&TyTyKind::get_lints());
store.register_late_pass(|| box TyTyKind);
store.register_group(
false,
"rustc::internal",
None,
vec![
LintId::of(DEFAULT_HASH_TYPES),
LintId::of(USAGE_OF_TY_TYKIND),
LintId::of(LINT_PASS_IMPL_WITHOUT_MACRO),
LintId::of(TY_PASS_BY_REFERENCE),
LintId::of(USAGE_OF_QUALIFIED_TY),
],
);
}
| 38.766595 | 100 | 0.66593 |
019da1f1b144e755a1b24ae99144b9dd90ec6f20 | 3,960 | //! This benchmark lives here because I don't want to implement `Resolve` again
//! and again.
#![feature(test)]
extern crate test;
use anyhow::Error;
use spack::resolvers::NodeResolver;
use std::{
collections::HashMap,
hint::black_box,
path::{Path, PathBuf},
};
use swc_atoms::js_word;
use swc_bundler::{Bundler, Load, ModuleData, ModuleRecord};
use swc_common::{sync::Lrc, FileName, SourceMap, Span, GLOBALS};
use swc_ecma_ast::*;
use swc_ecma_parser::{lexer::Lexer, JscTarget, Parser, StringInput, Syntax, TsConfig};
use swc_ecma_transforms::typescript::strip;
use swc_ecma_visit::FoldWith;
use test::Bencher;
#[bench]
#[ignore]
fn three_js(b: &mut Bencher) {
let dir = PathBuf::new()
.join("..")
.join("integration-tests")
.join("three-js")
.join("repo");
run_bench(b, &dir.join("src").join("Three.js"));
}
fn run_bench(b: &mut Bencher, entry: &Path) {
::testing::run_test2(false, |cm, _| {
b.iter(|| {
GLOBALS.with(|globals| {
let bundler = Bundler::new(
globals,
cm.clone(),
Loader { cm: cm.clone() },
NodeResolver::new(),
swc_bundler::Config {
..Default::default()
},
Box::new(Hook),
);
let mut entries = HashMap::new();
entries.insert("main".to_string(), FileName::Real(entry.to_path_buf()));
black_box(bundler.bundle(entries).unwrap());
});
});
Ok(())
})
.unwrap();
}
struct Loader {
cm: Lrc<SourceMap>,
}
impl Load for Loader {
fn load(&self, f: &FileName) -> Result<ModuleData, Error> {
let tsx;
let fm = match f {
FileName::Real(path) => {
tsx = path.to_string_lossy().ends_with(".tsx");
self.cm.load_file(&path)?
}
_ => unreachable!(),
};
let lexer = Lexer::new(
Syntax::Typescript(TsConfig {
decorators: true,
tsx,
..Default::default()
}),
JscTarget::Es2020,
StringInput::from(&*fm),
None,
);
let mut parser = Parser::new_from(lexer);
let module = parser.parse_module().unwrap();
let module = module.fold_with(&mut strip());
Ok(ModuleData {
fm,
module,
helpers: Default::default(),
})
}
}
struct Hook;
impl swc_bundler::Hook for Hook {
fn get_import_meta_props(
&self,
span: Span,
module_record: &ModuleRecord,
) -> Result<Vec<KeyValueProp>, Error> {
Ok(vec![
KeyValueProp {
key: PropName::Ident(Ident::new(js_word!("url"), span)),
value: Box::new(Expr::Lit(Lit::Str(Str {
span,
value: module_record.file_name.to_string().into(),
has_escape: false,
kind: Default::default(),
}))),
},
KeyValueProp {
key: PropName::Ident(Ident::new(js_word!("main"), span)),
value: Box::new(if module_record.is_entry {
Expr::Member(MemberExpr {
span,
obj: ExprOrSuper::Expr(Box::new(Expr::MetaProp(MetaPropExpr {
meta: Ident::new(js_word!("import"), span),
prop: Ident::new(js_word!("meta"), span),
}))),
prop: Box::new(Expr::Ident(Ident::new(js_word!("main"), span))),
computed: false,
})
} else {
Expr::Lit(Lit::Bool(Bool { span, value: false }))
}),
},
])
}
}
| 29.333333 | 88 | 0.479293 |
8f300cb88d247f4119c0cbf37d0b880b2119dd50 | 189,911 | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn endpoints(&self) -> endpoints::Client {
endpoints::Client(self.clone())
}
pub fn experiments(&self) -> experiments::Client {
experiments::Client(self.clone())
}
pub fn front_doors(&self) -> front_doors::Client {
front_doors::Client(self.clone())
}
pub fn frontend_endpoints(&self) -> frontend_endpoints::Client {
frontend_endpoints::Client(self.clone())
}
pub fn managed_rule_sets(&self) -> managed_rule_sets::Client {
managed_rule_sets::Client(self.clone())
}
pub fn network_experiment_profiles(&self) -> network_experiment_profiles::Client {
network_experiment_profiles::Client(self.clone())
}
pub fn policies(&self) -> policies::Client {
policies::Client(self.clone())
}
pub fn preconfigured_endpoints(&self) -> preconfigured_endpoints::Client {
preconfigured_endpoints::Client(self.clone())
}
pub fn reports(&self) -> reports::Client {
reports::Client(self.clone())
}
pub fn rules_engines(&self) -> rules_engines::Client {
rules_engines::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
NetworkExperimentProfiles_List(#[from] network_experiment_profiles::list::Error),
#[error(transparent)]
NetworkExperimentProfiles_ListByResourceGroup(#[from] network_experiment_profiles::list_by_resource_group::Error),
#[error(transparent)]
NetworkExperimentProfiles_Get(#[from] network_experiment_profiles::get::Error),
#[error(transparent)]
NetworkExperimentProfiles_CreateOrUpdate(#[from] network_experiment_profiles::create_or_update::Error),
#[error(transparent)]
NetworkExperimentProfiles_Update(#[from] network_experiment_profiles::update::Error),
#[error(transparent)]
NetworkExperimentProfiles_Delete(#[from] network_experiment_profiles::delete::Error),
#[error(transparent)]
PreconfiguredEndpoints_List(#[from] preconfigured_endpoints::list::Error),
#[error(transparent)]
Experiments_ListByProfile(#[from] experiments::list_by_profile::Error),
#[error(transparent)]
Experiments_Get(#[from] experiments::get::Error),
#[error(transparent)]
Experiments_CreateOrUpdate(#[from] experiments::create_or_update::Error),
#[error(transparent)]
Experiments_Update(#[from] experiments::update::Error),
#[error(transparent)]
Experiments_Delete(#[from] experiments::delete::Error),
#[error(transparent)]
Reports_GetLatencyScorecards(#[from] reports::get_latency_scorecards::Error),
#[error(transparent)]
Reports_GetTimeseries(#[from] reports::get_timeseries::Error),
#[error(transparent)]
CheckFrontDoorNameAvailability(#[from] check_front_door_name_availability::Error),
#[error(transparent)]
CheckFrontDoorNameAvailabilityWithSubscription(#[from] check_front_door_name_availability_with_subscription::Error),
#[error(transparent)]
FrontDoors_List(#[from] front_doors::list::Error),
#[error(transparent)]
FrontDoors_ListByResourceGroup(#[from] front_doors::list_by_resource_group::Error),
#[error(transparent)]
FrontDoors_Get(#[from] front_doors::get::Error),
#[error(transparent)]
FrontDoors_CreateOrUpdate(#[from] front_doors::create_or_update::Error),
#[error(transparent)]
FrontDoors_Delete(#[from] front_doors::delete::Error),
#[error(transparent)]
FrontendEndpoints_ListByFrontDoor(#[from] frontend_endpoints::list_by_front_door::Error),
#[error(transparent)]
FrontendEndpoints_Get(#[from] frontend_endpoints::get::Error),
#[error(transparent)]
Endpoints_PurgeContent(#[from] endpoints::purge_content::Error),
#[error(transparent)]
FrontendEndpoints_EnableHttps(#[from] frontend_endpoints::enable_https::Error),
#[error(transparent)]
FrontendEndpoints_DisableHttps(#[from] frontend_endpoints::disable_https::Error),
#[error(transparent)]
FrontDoors_ValidateCustomDomain(#[from] front_doors::validate_custom_domain::Error),
#[error(transparent)]
RulesEngines_ListByFrontDoor(#[from] rules_engines::list_by_front_door::Error),
#[error(transparent)]
RulesEngines_Get(#[from] rules_engines::get::Error),
#[error(transparent)]
RulesEngines_CreateOrUpdate(#[from] rules_engines::create_or_update::Error),
#[error(transparent)]
RulesEngines_Delete(#[from] rules_engines::delete::Error),
#[error(transparent)]
Policies_List(#[from] policies::list::Error),
#[error(transparent)]
Policies_Get(#[from] policies::get::Error),
#[error(transparent)]
Policies_CreateOrUpdate(#[from] policies::create_or_update::Error),
#[error(transparent)]
Policies_Delete(#[from] policies::delete::Error),
#[error(transparent)]
ManagedRuleSets_List(#[from] managed_rule_sets::list::Error),
}
pub mod network_experiment_profiles {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of Network Experiment Profiles under a subscription"]
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
#[doc = "Gets a list of Network Experiment Profiles within a resource group under a subscription"]
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
}
}
#[doc = "Gets an NetworkExperiment Profile by ProfileName"]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
}
}
#[doc = "Creates an NetworkExperiment Profile"]
pub fn create_or_update(
&self,
profile_name: impl Into<String>,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
parameters: impl Into<models::Profile>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
profile_name: profile_name.into(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Updates an NetworkExperimentProfiles by NetworkExperimentProfile name"]
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
parameters: impl Into<models::ProfileUpdateModel>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes an NetworkExperiment Profile by ProfileName"]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProfileList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/NetworkExperimentProfiles",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProfileList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ProfileList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ProfileList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Profile, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Profile =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Profile),
Created201(models::Profile),
Accepted202(models::Profile),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) profile_name: String,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) parameters: models::Profile,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Profile =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Profile =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Profile =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Profile),
Accepted202(models::Profile),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
pub(crate) parameters: models::ProfileUpdateModel,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Profile =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Profile =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod preconfigured_endpoints {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of Preconfigured Endpoints"]
pub fn list(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::PreconfiguredEndpointList, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/PreconfiguredEndpoints" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . profile_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::PreconfiguredEndpointList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod experiments {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of Experiments"]
pub fn list_by_profile(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
) -> list_by_profile::Builder {
list_by_profile::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
}
}
#[doc = "Gets an Experiment by ExperimentName"]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
experiment_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
experiment_name: experiment_name.into(),
}
}
#[doc = "Creates or updates an Experiment"]
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
experiment_name: impl Into<String>,
parameters: impl Into<models::Experiment>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
experiment_name: experiment_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Updates an Experiment by Experiment id"]
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
experiment_name: impl Into<String>,
parameters: impl Into<models::ExperimentUpdateModel>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
experiment_name: experiment_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes an Experiment"]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
experiment_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
experiment_name: experiment_name.into(),
}
}
}
pub mod list_by_profile {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ExperimentList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ExperimentList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
pub(crate) experiment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Experiment, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name,
&self.experiment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Experiment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Experiment),
Created201(models::Experiment),
Accepted202(models::Experiment),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
pub(crate) experiment_name: String,
pub(crate) parameters: models::Experiment,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name,
&self.experiment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Experiment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Experiment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Experiment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::Experiment),
Accepted202(models::Experiment),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
pub(crate) experiment_name: String,
pub(crate) parameters: models::ExperimentUpdateModel,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name,
&self.experiment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Experiment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Experiment =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
pub(crate) experiment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.profile_name,
&self.experiment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod reports {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a Latency Scorecard for a given Experiment"]
pub fn get_latency_scorecards(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
experiment_name: impl Into<String>,
aggregation_interval: impl Into<String>,
) -> get_latency_scorecards::Builder {
get_latency_scorecards::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
experiment_name: experiment_name.into(),
aggregation_interval: aggregation_interval.into(),
end_date_time_utc: None,
country: None,
}
}
#[doc = "Gets a Timeseries for a given Experiment"]
pub fn get_timeseries(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
profile_name: impl Into<String>,
experiment_name: impl Into<String>,
start_date_time_utc: impl Into<String>,
end_date_time_utc: impl Into<String>,
aggregation_interval: impl Into<String>,
timeseries_type: impl Into<String>,
) -> get_timeseries::Builder {
get_timeseries::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
profile_name: profile_name.into(),
experiment_name: experiment_name.into(),
start_date_time_utc: start_date_time_utc.into(),
end_date_time_utc: end_date_time_utc.into(),
aggregation_interval: aggregation_interval.into(),
timeseries_type: timeseries_type.into(),
endpoint: None,
country: None,
}
}
}
pub mod get_latency_scorecards {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
pub(crate) experiment_name: String,
pub(crate) aggregation_interval: String,
pub(crate) end_date_time_utc: Option<String>,
pub(crate) country: Option<String>,
}
impl Builder {
pub fn end_date_time_utc(mut self, end_date_time_utc: impl Into<String>) -> Self {
self.end_date_time_utc = Some(end_date_time_utc.into());
self
}
pub fn country(mut self, country: impl Into<String>) -> Self {
self.country = Some(country.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::LatencyScorecard, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}/LatencyScorecard" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . profile_name , & self . experiment_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
if let Some(end_date_time_utc) = &self.end_date_time_utc {
url.query_pairs_mut().append_pair("endDateTimeUTC", end_date_time_utc);
}
if let Some(country) = &self.country {
url.query_pairs_mut().append_pair("country", country);
}
let aggregation_interval = &self.aggregation_interval;
url.query_pairs_mut().append_pair("aggregationInterval", aggregation_interval);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::LatencyScorecard =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_timeseries {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) profile_name: String,
pub(crate) experiment_name: String,
pub(crate) start_date_time_utc: String,
pub(crate) end_date_time_utc: String,
pub(crate) aggregation_interval: String,
pub(crate) timeseries_type: String,
pub(crate) endpoint: Option<String>,
pub(crate) country: Option<String>,
}
impl Builder {
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn country(mut self, country: impl Into<String>) -> Self {
self.country = Some(country.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Timeseries, Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/NetworkExperimentProfiles/{}/Experiments/{}/Timeseries" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . profile_name , & self . experiment_name) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2019-11-01");
let start_date_time_utc = &self.start_date_time_utc;
url.query_pairs_mut().append_pair("startDateTimeUTC", start_date_time_utc);
let end_date_time_utc = &self.end_date_time_utc;
url.query_pairs_mut().append_pair("endDateTimeUTC", end_date_time_utc);
let aggregation_interval = &self.aggregation_interval;
url.query_pairs_mut().append_pair("aggregationInterval", aggregation_interval);
let timeseries_type = &self.timeseries_type;
url.query_pairs_mut().append_pair("timeseriesType", timeseries_type);
if let Some(endpoint) = &self.endpoint {
url.query_pairs_mut().append_pair("endpoint", endpoint);
}
if let Some(country) = &self.country {
url.query_pairs_mut().append_pair("country", country);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Timeseries =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
impl Client {
pub fn check_front_door_name_availability(
&self,
check_front_door_name_availability_input: impl Into<models::CheckNameAvailabilityInput>,
) -> check_front_door_name_availability::Builder {
check_front_door_name_availability::Builder {
client: self.clone(),
check_front_door_name_availability_input: check_front_door_name_availability_input.into(),
}
}
pub fn check_front_door_name_availability_with_subscription(
&self,
check_front_door_name_availability_input: impl Into<models::CheckNameAvailabilityInput>,
subscription_id: impl Into<String>,
) -> check_front_door_name_availability_with_subscription::Builder {
check_front_door_name_availability_with_subscription::Builder {
client: self.clone(),
check_front_door_name_availability_input: check_front_door_name_availability_input.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod check_front_door_name_availability {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::Client,
pub(crate) check_front_door_name_availability_input: models::CheckNameAvailabilityInput,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CheckNameAvailabilityOutput, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/providers/Microsoft.Network/checkFrontDoorNameAvailability",
self.client.endpoint(),
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.check_front_door_name_availability_input).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CheckNameAvailabilityOutput =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod check_front_door_name_availability_with_subscription {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::Client,
pub(crate) check_front_door_name_availability_input: models::CheckNameAvailabilityInput,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CheckNameAvailabilityOutput, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/checkFrontDoorNameAvailability",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.check_front_door_name_availability_input).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CheckNameAvailabilityOutput =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod front_doors {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
}
}
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
front_door_parameters: impl Into<models::FrontDoor>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
front_door_parameters: front_door_parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
}
}
pub fn validate_custom_domain(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
custom_domain_properties: impl Into<models::ValidateCustomDomainInput>,
) -> validate_custom_domain::Builder {
validate_custom_domain::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
custom_domain_properties: custom_domain_properties.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::FrontDoorListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/frontDoors",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontDoorListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::FrontDoorListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontDoorListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::FrontDoor, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontDoor =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::FrontDoor),
Created201(models::FrontDoor),
Accepted202(models::FrontDoor),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) front_door_parameters: models::FrontDoor,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.front_door_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontDoor =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontDoor =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontDoor =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod validate_custom_domain {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) custom_domain_properties: models::ValidateCustomDomainInput,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ValidateCustomDomainOutput, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/validateCustomDomain",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.custom_domain_properties).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ValidateCustomDomainOutput =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod frontend_endpoints {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_by_front_door(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
) -> list_by_front_door::Builder {
list_by_front_door::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
}
}
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
frontend_endpoint_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
frontend_endpoint_name: frontend_endpoint_name.into(),
}
}
pub fn enable_https(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
frontend_endpoint_name: impl Into<String>,
custom_https_configuration: impl Into<models::CustomHttpsConfiguration>,
) -> enable_https::Builder {
enable_https::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
frontend_endpoint_name: frontend_endpoint_name.into(),
custom_https_configuration: custom_https_configuration.into(),
}
}
pub fn disable_https(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
frontend_endpoint_name: impl Into<String>,
) -> disable_https::Builder {
disable_https::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
frontend_endpoint_name: frontend_endpoint_name.into(),
}
}
}
pub mod list_by_front_door {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::FrontendEndpointsListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontendEndpointsListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) frontend_endpoint_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::FrontendEndpoint, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name,
&self.frontend_endpoint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::FrontendEndpoint =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod enable_https {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) frontend_endpoint_name: String,
pub(crate) custom_https_configuration: models::CustomHttpsConfiguration,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints/{}/enableHttps",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name,
&self.frontend_endpoint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.custom_https_configuration).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod disable_https {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) frontend_endpoint_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/frontendEndpoints/{}/disableHttps",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name,
&self.frontend_endpoint_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod endpoints {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn purge_content(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
content_file_paths: impl Into<models::PurgeParameters>,
) -> purge_content::Builder {
purge_content::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
content_file_paths: content_file_paths.into(),
}
}
}
pub mod purge_content {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) content_file_paths: models::PurgeParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/purge",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.content_file_paths).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod rules_engines {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_by_front_door(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
) -> list_by_front_door::Builder {
list_by_front_door::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
}
}
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
rules_engine_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
rules_engine_name: rules_engine_name.into(),
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
rules_engine_name: impl Into<String>,
rules_engine_parameters: impl Into<models::RulesEngine>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
rules_engine_name: rules_engine_name.into(),
rules_engine_parameters: rules_engine_parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
front_door_name: impl Into<String>,
rules_engine_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
front_door_name: front_door_name.into(),
rules_engine_name: rules_engine_name.into(),
}
}
}
pub mod list_by_front_door {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RulesEngineListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RulesEngineListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) rules_engine_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RulesEngine, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name,
&self.rules_engine_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RulesEngine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::RulesEngine),
Created201(models::RulesEngine),
Accepted202(models::RulesEngine),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) rules_engine_name: String,
pub(crate) rules_engine_parameters: models::RulesEngine,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name,
&self.rules_engine_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.rules_engine_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RulesEngine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RulesEngine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::RulesEngine =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) front_door_name: String,
pub(crate) rules_engine_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoors/{}/rulesEngines/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.front_door_name,
&self.rules_engine_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod policies {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, resource_group_name: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
policy_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
policy_name: policy_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
policy_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::WebApplicationFirewallPolicy>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
policy_name: policy_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
policy_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
policy_name: policy_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::WebApplicationFirewallPolicyList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/frontDoorWebApplicationFirewallPolicies",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WebApplicationFirewallPolicyList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) policy_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::WebApplicationFirewallPolicy, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WebApplicationFirewallPolicy =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::WebApplicationFirewallPolicy),
Created201(models::WebApplicationFirewallPolicy),
Accepted202(models::WebApplicationFirewallPolicy),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) policy_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::WebApplicationFirewallPolicy,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WebApplicationFirewallPolicy =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WebApplicationFirewallPolicy =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::WebApplicationFirewallPolicy =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) policy_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
Err(Error::UnexpectedResponse {
status_code,
body: rsp_body,
})
}
}
})
}
}
}
}
pub mod managed_rule_sets {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::StreamError),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ManagedRuleSetDefinitionList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallManagedRuleSets",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-04-01");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ManagedRuleSetDefinitionList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| 51.313429 | 316 | 0.525778 |
220edcfa89868877792c11a4848ad23ee94493f2 | 2,078 | // Generated by `scripts/generate.js`
use std::os::raw::c_char;
use std::ops::Deref;
use std::ptr;
use std::cmp;
use std::mem;
use utils::c_bindings::*;
use utils::vk_convert::*;
use utils::vk_null::*;
use utils::vk_ptr::*;
use utils::vk_traits::*;
use vulkan::vk::*;
use vulkan::vk::{VkStructureType,RawVkStructureType};
use vulkan::vk::{VkImageLayout,RawVkImageLayout};
/// Wrapper for [VkAttachmentReferenceStencilLayout](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAttachmentReferenceStencilLayout.html).
#[derive(Debug, Clone)]
pub struct VkAttachmentReferenceStencilLayout {
pub stencil_layout: VkImageLayout,
}
#[doc(hidden)]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct RawVkAttachmentReferenceStencilLayout {
pub s_type: RawVkStructureType,
pub next: *mut c_void,
pub stencil_layout: RawVkImageLayout,
}
impl VkWrappedType<RawVkAttachmentReferenceStencilLayout> for VkAttachmentReferenceStencilLayout {
fn vk_to_raw(src: &VkAttachmentReferenceStencilLayout, dst: &mut RawVkAttachmentReferenceStencilLayout) {
dst.s_type = vk_to_raw_value(&VkStructureType::AttachmentReferenceStencilLayout);
dst.next = ptr::null_mut();
dst.stencil_layout = vk_to_raw_value(&src.stencil_layout);
}
}
impl VkRawType<VkAttachmentReferenceStencilLayout> for RawVkAttachmentReferenceStencilLayout {
fn vk_to_wrapped(src: &RawVkAttachmentReferenceStencilLayout) -> VkAttachmentReferenceStencilLayout {
VkAttachmentReferenceStencilLayout {
stencil_layout: RawVkImageLayout::vk_to_wrapped(&src.stencil_layout),
}
}
}
impl Default for VkAttachmentReferenceStencilLayout {
fn default() -> VkAttachmentReferenceStencilLayout {
VkAttachmentReferenceStencilLayout {
stencil_layout: Default::default(),
}
}
}
impl VkSetup for VkAttachmentReferenceStencilLayout {
fn vk_setup(&mut self, fn_table: *mut VkFunctionTable) {
}
}
impl VkFree for RawVkAttachmentReferenceStencilLayout {
fn vk_free(&self) {
}
} | 31.484848 | 164 | 0.742541 |
0359828bd0c7bba9121ebbb8692bec66b842915c | 4,625 | // Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
//! Working with "string-able" types.
use std::{
fmt::{Debug, Display},
hash::Hash,
str::FromStr,
};
/// Represents a "string-able" type.
///
/// The type is required to be able to be represented as a string [`Display`], along with knowing
/// how to be parsed from the string representation [`FromStr`]. To make sure things stay easy to
/// debug, both the [`Tag`] and the [`FromStr::Err`] must implement [`Debug`].
///
/// [`Clone`], [`Hash`], and [`Eq`] are needed so that it can represent un-hashable types.
///
/// [`Send`] and [`Sync`] and `'static` are current requirements due to how it is sometimes sent
/// across thread boundaries, although some of those constraints may relax in the future.
///
/// The simplest type that fits all these requirements is a [`String`](std::string::String).
///
/// # Handling Errors
///
/// Because we leave it up to the type to implement [`FromStr`], if an error is returned during
/// parsing then Tauri will [`std::panic!`] with the string it failed to parse.
///
/// To avoid Tauri panicking during the application runtime, have your type be able to handle
/// unknown events and never return an error in [`FromStr`]. Then it will be up to your own code
/// to handle the unknown event.
///
/// # Example
///
/// ```
/// use std::fmt;
/// use std::str::FromStr;
///
/// #[derive(Debug, Clone, Hash, Eq, PartialEq)]
/// enum Event {
/// Foo,
/// Bar,
/// Unknown(String),
/// }
///
/// impl fmt::Display for Event {
/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// f.write_str(match self {
/// Self::Foo => "foo",
/// Self::Bar => "bar",
/// Self::Unknown(s) => &s
/// })
/// }
/// }
///
/// impl FromStr for Event {
/// type Err = std::convert::Infallible;
///
/// fn from_str(s: &str) -> Result<Self, Self::Err> {
/// Ok(match s {
/// "foo" => Self::Foo,
/// "bar" => Self::Bar,
/// other => Self::Unknown(other.to_string())
/// })
/// }
/// }
///
/// // safe to unwrap because we know it's infallible due to our FromStr implementation.
/// let event: Event = "tauri://file-drop".parse().unwrap();
///
/// // show that this event type can be represented as a Tag, a requirement for using it in Tauri.
/// fn is_file_drop(tag: impl tauri::runtime::tag::Tag) {
/// assert_eq!("tauri://file-drop", tag.to_string());
/// }
///
/// is_file_drop(event);
/// ```
pub trait Tag: Hash + Eq + FromStr + Display + Debug + Clone + Send + Sync + 'static {}
/// Automatically implement [`Tag`] for all types that fit the requirements.
impl<T, E: Debug> Tag for T where
T: Hash + Eq + FromStr<Err = E> + Display + Debug + Clone + Send + Sync + 'static
{
}
/// A reference to a [`Tag`].
///
/// * [`Display`] so that we can still convert this tag to a JavaScript string.
/// * [`ToOwned`] to make sure we can clone it into the owned tag in specific cases.
/// * [`PartialEq`] so that we can compare refs to the owned tags easily.
/// * [`Hash`] + [`Eq`] because we want to be able to use a ref as a key to internal hashmaps.
pub trait TagRef<T: Tag>: Display + ToOwned<Owned = T> + PartialEq<T> + Eq + Hash
where
T: std::borrow::Borrow<Self>,
{
}
/// Automatically implement [`TagRef`] for all types that fit the requirements.
impl<T: Tag, R> TagRef<T> for R
where
T: std::borrow::Borrow<R>,
R: Display + ToOwned<Owned = T> + PartialEq<T> + Eq + Hash + ?Sized,
{
}
/// Private helper to turn [`Tag`] related things into JavaScript, safely.
///
/// The main concern is string escaping, so we rely on [`serde_json`] to handle all serialization
/// of the [`Tag`] as a string. We do this instead of requiring [`serde::Serialize`] on [`Tag`]
/// because it really represents a string, not any serializable data structure.
///
/// We don't want downstream users to implement this trait so that [`Tag`]s cannot be turned into
/// invalid JavaScript - regardless of their content.
pub(crate) trait ToJsString {
fn to_js_string(&self) -> crate::Result<String>;
}
impl<D: Display> ToJsString for D {
/// Turn any [`Tag`] into the JavaScript representation of a string.
fn to_js_string(&self) -> crate::Result<String> {
Ok(serde_json::to_string(&self.to_string())?)
}
}
/// Turn any collection of [`Tag`]s into a JavaScript array of strings.
pub(crate) fn tags_to_javascript_array(tags: &[impl Tag]) -> crate::Result<String> {
let tags: Vec<String> = tags.iter().map(ToString::to_string).collect();
Ok(serde_json::to_string(&tags)?)
}
| 35.037879 | 98 | 0.643892 |
29139591d13ee5e9dbd811e906df49a77475a95a | 117,176 | //! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use crate::packet_hasher::PacketHasher;
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools;
use lru::LruCache;
use retain_mut::RetainMut;
use safecoin_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo};
use solana_ledger::{blockstore_processor::TransactionStatusSender, entry::hash_transactions};
use safecoin_measure::measure::Measure;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
use solana_perf::{
cuda_runtime::PinnedVec,
data_budget::DataBudget,
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
perf_libs,
};
use solana_poh::poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder};
use solana_runtime::{
accounts_db::ErrorCounters,
bank::{
Bank, ExecuteTimings, TransactionBalancesSet, TransactionCheckResult,
TransactionExecutionResult,
},
bank_utils,
hashed_transaction::HashedTransaction,
transaction_batch::TransactionBatch,
vote_sender_types::ReplayVoteSender,
};
use safecoin_sdk::{
clock::{
Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
},
message::Message,
pubkey::Pubkey,
short_vec::decode_shortu16_len,
signature::Signature,
timing::{duration_as_ms, timestamp, AtomicInterval},
transaction::{self, Transaction, TransactionError},
};
use safecoin_transaction_status::token_balances::{
collect_token_balances, TransactionTokenBalancesSet,
};
use std::{
borrow::Cow,
cmp,
collections::{HashMap, VecDeque},
env,
mem::size_of,
net::{SocketAddr, UdpSocket},
ops::DerefMut,
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
sync::{Arc, Mutex},
thread::{self, Builder, JoinHandle},
time::Duration,
time::Instant,
};
/// (packets, valid_indexes, forwarded)
/// Set of packets with a list of which are valid and if this batch has been forwarded.
type PacketsAndOffsets = (Packets, Vec<usize>, bool);
pub type UnprocessedPackets = VecDeque<PacketsAndOffsets>;
/// Transaction forwarding
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2;
pub const HOLD_TRANSACTIONS_SLOT_OFFSET: u64 = 20;
// Fixed thread size seems to be fastest on GCP setup
pub const NUM_THREADS: u32 = 4;
const TOTAL_BUFFERED_PACKETS: usize = 500_000;
const MAX_NUM_TRANSACTIONS_PER_BATCH: usize = 128;
const DEFAULT_LRU_SIZE: usize = 200_000;
const NUM_VOTE_PROCESSING_THREADS: u32 = 2;
const MIN_THREADS_BANKING: u32 = 1;
#[derive(Debug, Default)]
pub struct BankingStageStats {
last_report: AtomicInterval,
id: u32,
process_packets_count: AtomicUsize,
new_tx_count: AtomicUsize,
dropped_packet_batches_count: AtomicUsize,
dropped_packets_count: AtomicUsize,
dropped_duplicated_packets_count: AtomicUsize,
newly_buffered_packets_count: AtomicUsize,
current_buffered_packets_count: AtomicUsize,
current_buffered_packet_batches_count: AtomicUsize,
rebuffered_packets_count: AtomicUsize,
consumed_buffered_packets_count: AtomicUsize,
// Timing
consume_buffered_packets_elapsed: AtomicU64,
process_packets_elapsed: AtomicU64,
handle_retryable_packets_elapsed: AtomicU64,
filter_pending_packets_elapsed: AtomicU64,
packet_duplicate_check_elapsed: AtomicU64,
packet_conversion_elapsed: AtomicU64,
transaction_processing_elapsed: AtomicU64,
}
impl BankingStageStats {
pub fn new(id: u32) -> Self {
BankingStageStats {
id,
..BankingStageStats::default()
}
}
fn report(&self, report_interval_ms: u64) {
if self.last_report.should_update(report_interval_ms) {
datapoint_info!(
"banking_stage-loop-stats",
("id", self.id as i64, i64),
(
"process_packets_count",
self.process_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"new_tx_count",
self.new_tx_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"dropped_packet_batches_count",
self.dropped_packet_batches_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"dropped_packets_count",
self.dropped_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"dropped_duplicated_packets_count",
self.dropped_duplicated_packets_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"newly_buffered_packets_count",
self.newly_buffered_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"current_buffered_packet_batches_count",
self.current_buffered_packet_batches_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"current_buffered_packets_count",
self.current_buffered_packets_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"rebuffered_packets_count",
self.rebuffered_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"consumed_buffered_packets_count",
self.consumed_buffered_packets_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"consume_buffered_packets_elapsed",
self.consume_buffered_packets_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"process_packets_elapsed",
self.process_packets_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"handle_retryable_packets_elapsed",
self.handle_retryable_packets_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"filter_pending_packets_elapsed",
self.filter_pending_packets_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"packet_duplicate_check_elapsed",
self.packet_duplicate_check_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"packet_conversion_elapsed",
self.packet_conversion_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"transaction_processing_elapsed",
self.transaction_processing_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
/// Stores the stage's thread handle and output receiver.
pub struct BankingStage {
bank_thread_hdls: Vec<JoinHandle<()>>,
}
#[derive(Debug, Clone)]
pub enum BufferedPacketsDecision {
Consume(u128),
Forward,
ForwardAndHold,
Hold,
}
#[derive(Debug, Clone)]
pub enum ForwardOption {
NotForward,
ForwardTpuVote,
ForwardTransaction,
}
impl BankingStage {
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
#[allow(clippy::new_ret_no_self)]
pub fn new(
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) -> Self {
Self::new_num_threads(
cluster_info,
poh_recorder,
verified_receiver,
tpu_verified_vote_receiver,
verified_vote_receiver,
Self::num_threads(),
transaction_status_sender,
gossip_vote_sender,
)
}
fn new_num_threads(
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
num_threads: u32,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) -> Self {
let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - 1) as usize * PACKETS_PER_BATCH);
// Single thread to generate entries from many banks.
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
// Once an entry has been recorded, its blockhash is registered with the bank.
let my_pubkey = cluster_info.id();
let duplicates = Arc::new(Mutex::new((
LruCache::new(DEFAULT_LRU_SIZE),
PacketHasher::default(),
)));
let data_budget = Arc::new(DataBudget::default());
// Many banks that process transactions in parallel.
assert!(num_threads >= NUM_VOTE_PROCESSING_THREADS + MIN_THREADS_BANKING);
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
.map(|i| {
let (verified_receiver, forward_option) = match i {
0 => {
// Disable forwarding of vote transactions
// from gossip. Note - votes can also arrive from tpu
(verified_vote_receiver.clone(), ForwardOption::NotForward)
}
1 => (
tpu_verified_vote_receiver.clone(),
ForwardOption::ForwardTpuVote,
),
_ => (verified_receiver.clone(), ForwardOption::ForwardTransaction),
};
let poh_recorder = poh_recorder.clone();
let cluster_info = cluster_info.clone();
let mut recv_start = Instant::now();
let transaction_status_sender = transaction_status_sender.clone();
let gossip_vote_sender = gossip_vote_sender.clone();
let duplicates = duplicates.clone();
let data_budget = data_budget.clone();
Builder::new()
.name("solana-banking-stage-tx".to_string())
.spawn(move || {
Self::process_loop(
my_pubkey,
&verified_receiver,
&poh_recorder,
&cluster_info,
&mut recv_start,
forward_option,
i,
batch_limit,
transaction_status_sender,
gossip_vote_sender,
&duplicates,
&data_budget,
);
})
.unwrap()
})
.collect();
Self { bank_thread_hdls }
}
fn filter_valid_packets_for_forwarding<'a>(
all_packets: impl Iterator<Item = &'a PacketsAndOffsets>,
) -> Vec<&'a Packet> {
all_packets
.filter(|(_p, _indexes, forwarded)| !forwarded)
.flat_map(|(p, valid_indexes, _forwarded)| {
valid_indexes.iter().map(move |x| &p.packets[*x])
})
.collect()
}
fn forward_buffered_packets(
socket: &std::net::UdpSocket,
tpu_forwards: &std::net::SocketAddr,
unprocessed_packets: &UnprocessedPackets,
data_budget: &DataBudget,
) -> std::io::Result<()> {
let packets = Self::filter_valid_packets_for_forwarding(unprocessed_packets.iter());
inc_new_counter_info!("banking_stage-forwarded_packets", packets.len());
const INTERVAL_MS: u64 = 100;
const MAX_BYTES_PER_SECOND: usize = 10_000 * 1200;
const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000;
const MAX_BYTES_BUDGET: usize = MAX_BYTES_PER_INTERVAL * 5;
data_budget.update(INTERVAL_MS, |bytes| {
std::cmp::min(bytes + MAX_BYTES_PER_INTERVAL, MAX_BYTES_BUDGET)
});
for p in packets {
if data_budget.take(p.meta.size) {
socket.send_to(&p.data[..p.meta.size], &tpu_forwards)?;
}
}
Ok(())
}
// Returns whether the given `Packets` has any more remaining unprocessed
// transactions
fn update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes: &mut Vec<usize>,
new_unprocessed_indexes: Vec<usize>,
) -> bool {
let has_more_unprocessed_transactions =
Self::packet_has_more_unprocessed_transactions(&new_unprocessed_indexes);
if has_more_unprocessed_transactions {
*original_unprocessed_indexes = new_unprocessed_indexes
};
has_more_unprocessed_transactions
}
pub fn consume_buffered_packets(
my_pubkey: &Pubkey,
max_tx_ingestion_ns: u128,
poh_recorder: &Arc<Mutex<PohRecorder>>,
buffered_packets: &mut UnprocessedPackets,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
test_fn: Option<impl Fn()>,
banking_stage_stats: &BankingStageStats,
recorder: &TransactionRecorder,
) {
let mut rebuffered_packets_len = 0;
let mut new_tx_count = 0;
let buffered_len = buffered_packets.len();
let mut proc_start = Measure::start("consume_buffered_process");
let mut reached_end_of_slot = None;
buffered_packets.retain_mut(|(msgs, ref mut original_unprocessed_indexes, _forwarded)| {
if let Some((next_leader, bank)) = &reached_end_of_slot {
// We've hit the end of this slot, no need to perform more processing,
// just filter the remaining packets for the invalid (e.g. too old) ones
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
bank,
msgs,
original_unprocessed_indexes,
my_pubkey,
*next_leader,
);
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
)
} else {
let bank_start = poh_recorder.lock().unwrap().bank_start();
if let Some((bank, bank_creation_time)) = bank_start {
let (processed, verified_txs_len, new_unprocessed_indexes) =
Self::process_packets_transactions(
&bank,
&bank_creation_time,
recorder,
msgs,
original_unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
banking_stage_stats,
);
if processed < verified_txs_len
|| !Bank::should_bank_still_be_processing_txs(
&bank_creation_time,
max_tx_ingestion_ns,
)
{
reached_end_of_slot =
Some((poh_recorder.lock().unwrap().next_slot_leader(), bank));
}
new_tx_count += processed;
// Out of the buffered packets just retried, collect any still unprocessed
// transactions in this batch for forwarding
rebuffered_packets_len += new_unprocessed_indexes.len();
let has_more_unprocessed_transactions =
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
);
if let Some(test_fn) = &test_fn {
test_fn();
}
has_more_unprocessed_transactions
} else {
rebuffered_packets_len += original_unprocessed_indexes.len();
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
original_unprocessed_indexes
));
true
}
}
});
proc_start.stop();
debug!(
"@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}",
timestamp(),
buffered_len,
proc_start.as_ms(),
new_tx_count,
(new_tx_count as f32) / (proc_start.as_s())
);
banking_stage_stats
.consume_buffered_packets_elapsed
.fetch_add(proc_start.as_us(), Ordering::Relaxed);
banking_stage_stats
.rebuffered_packets_count
.fetch_add(rebuffered_packets_len, Ordering::Relaxed);
banking_stage_stats
.consumed_buffered_packets_count
.fetch_add(new_tx_count, Ordering::Relaxed);
}
fn consume_or_forward_packets(
my_pubkey: &Pubkey,
leader_pubkey: Option<Pubkey>,
bank_still_processing_txs: Option<&Arc<Bank>>,
would_be_leader: bool,
would_be_leader_shortly: bool,
) -> BufferedPacketsDecision {
leader_pubkey.map_or(
// If leader is not known, return the buffered packets as is
BufferedPacketsDecision::Hold,
// else process the packets
|x| {
if let Some(bank) = bank_still_processing_txs {
// If the bank is available, this node is the leader
BufferedPacketsDecision::Consume(bank.ns_per_slot)
} else if would_be_leader_shortly {
// If the node will be the leader soon, hold the packets for now
BufferedPacketsDecision::Hold
} else if would_be_leader {
// Node will be leader within ~20 slots, hold the transactions in
// case it is the only node which produces an accepted slot.
BufferedPacketsDecision::ForwardAndHold
} else if x != *my_pubkey {
// If the current node is not the leader, forward the buffered packets
BufferedPacketsDecision::Forward
} else {
// We don't know the leader. Hold the packets for now
BufferedPacketsDecision::Hold
}
},
)
}
#[allow(clippy::too_many_arguments)]
fn process_buffered_packets(
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
buffered_packets: &mut UnprocessedPackets,
forward_option: &ForwardOption,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
banking_stage_stats: &BankingStageStats,
recorder: &TransactionRecorder,
data_budget: &DataBudget,
) -> BufferedPacketsDecision {
let bank_start;
let (
leader_at_slot_offset,
bank_still_processing_txs,
would_be_leader,
would_be_leader_shortly,
) = {
let poh = poh_recorder.lock().unwrap();
bank_start = poh.bank_start();
(
poh.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET),
PohRecorder::get_bank_still_processing_txs(&bank_start),
poh.would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET * DEFAULT_TICKS_PER_SLOT),
poh.would_be_leader(
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET - 1) * DEFAULT_TICKS_PER_SLOT,
),
)
};
let decision = Self::consume_or_forward_packets(
my_pubkey,
leader_at_slot_offset,
bank_still_processing_txs,
would_be_leader,
would_be_leader_shortly,
);
match decision {
BufferedPacketsDecision::Consume(max_tx_ingestion_ns) => {
Self::consume_buffered_packets(
my_pubkey,
max_tx_ingestion_ns,
poh_recorder,
buffered_packets,
transaction_status_sender,
gossip_vote_sender,
None::<Box<dyn Fn()>>,
banking_stage_stats,
recorder,
);
}
BufferedPacketsDecision::Forward => {
Self::handle_forwarding(
forward_option,
cluster_info,
buffered_packets,
poh_recorder,
socket,
false,
data_budget,
);
}
BufferedPacketsDecision::ForwardAndHold => {
Self::handle_forwarding(
forward_option,
cluster_info,
buffered_packets,
poh_recorder,
socket,
true,
data_budget,
);
}
_ => (),
}
decision
}
fn handle_forwarding(
forward_option: &ForwardOption,
cluster_info: &ClusterInfo,
buffered_packets: &mut UnprocessedPackets,
poh_recorder: &Arc<Mutex<PohRecorder>>,
socket: &UdpSocket,
hold: bool,
data_budget: &DataBudget,
) {
let addr = match forward_option {
ForwardOption::NotForward => {
if !hold {
buffered_packets.clear();
}
return;
}
ForwardOption::ForwardTransaction => {
next_leader_tpu_forwards(cluster_info, poh_recorder)
}
ForwardOption::ForwardTpuVote => next_leader_tpu_vote(cluster_info, poh_recorder),
};
let addr = match addr {
Some(addr) => addr,
None => return,
};
let _ = Self::forward_buffered_packets(socket, &addr, buffered_packets, data_budget);
if hold {
buffered_packets.retain(|(_, index, _)| !index.is_empty());
for (_, _, forwarded) in buffered_packets.iter_mut() {
*forwarded = true;
}
} else {
buffered_packets.clear();
}
}
#[allow(clippy::too_many_arguments)]
pub fn process_loop(
my_pubkey: Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
recv_start: &mut Instant,
forward_option: ForwardOption,
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
data_budget: &DataBudget,
) {
let recorder = poh_recorder.lock().unwrap().recorder();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = VecDeque::with_capacity(batch_limit);
let banking_stage_stats = BankingStageStats::new(id);
loop {
while !buffered_packets.is_empty() {
let decision = Self::process_buffered_packets(
&my_pubkey,
&socket,
poh_recorder,
cluster_info,
&mut buffered_packets,
&forward_option,
transaction_status_sender.clone(),
&gossip_vote_sender,
&banking_stage_stats,
&recorder,
data_budget,
);
if matches!(decision, BufferedPacketsDecision::Hold)
|| matches!(decision, BufferedPacketsDecision::ForwardAndHold)
{
// If we are waiting on a new bank,
// check the receiver for more transactions/for exiting
break;
}
}
let recv_timeout = if !buffered_packets.is_empty() {
// If packets are buffered, let's wait for less time on recv from the channel.
// This helps detect the next leader faster, and processing the buffered
// packets quickly
Duration::from_millis(10)
} else {
// Default wait time
Duration::from_millis(100)
};
match Self::process_packets(
&my_pubkey,
verified_receiver,
poh_recorder,
recv_start,
recv_timeout,
id,
batch_limit,
transaction_status_sender.clone(),
&gossip_vote_sender,
&mut buffered_packets,
&banking_stage_stats,
duplicates,
&recorder,
) {
Ok(()) | Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
}
banking_stage_stats.report(1000);
}
}
pub fn num_threads() -> u32 {
cmp::max(
env::var("SAFECOIN_BANKING_THREADS")
.map(|x| x.parse().unwrap_or(NUM_THREADS))
.unwrap_or(NUM_THREADS),
NUM_VOTE_PROCESSING_THREADS + MIN_THREADS_BANKING,
)
}
#[allow(clippy::match_wild_err_arm)]
fn record_transactions<'a>(
bank_slot: Slot,
txs: impl Iterator<Item = &'a Transaction>,
results: &[TransactionExecutionResult],
recorder: &TransactionRecorder,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut processed_generation = Measure::start("record::process_generation");
let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = results
.iter()
.zip(txs)
.enumerate()
.filter_map(|(i, ((r, _n), x))| {
if Bank::can_commit(r) {
Some((x.clone(), i))
} else {
None
}
})
.unzip();
processed_generation.stop();
let num_to_commit = processed_transactions.len();
debug!("num_to_commit: {} ", num_to_commit);
// unlock all the accounts with errors which are filtered by the above `filter_map`
if !processed_transactions.is_empty() {
inc_new_counter_info!("banking_stage-record_count", 1);
inc_new_counter_info!("banking_stage-record_transactions", num_to_commit);
let mut hash_time = Measure::start("record::hash");
let hash = hash_transactions(&processed_transactions[..]);
hash_time.stop();
let mut poh_record = Measure::start("record::poh_record");
// record and unlock will unlock all the successful transactions
let res = recorder.record(bank_slot, hash, processed_transactions);
match res {
Ok(()) => (),
Err(PohRecorderError::MaxHeightReached) => {
inc_new_counter_info!("banking_stage-max_height_reached", 1);
inc_new_counter_info!(
"banking_stage-max_height_reached_num_to_commit",
num_to_commit
);
// If record errors, add all the committable transactions (the ones
// we just attempted to record) as retryable
return (
Err(PohRecorderError::MaxHeightReached),
processed_transactions_indexes,
);
}
Err(e) => panic!("Poh recorder returned unexpected error: {:?}", e),
}
poh_record.stop();
}
(Ok(num_to_commit), vec![])
}
fn process_and_record_transactions_locked(
bank: &Arc<Bank>,
poh: &TransactionRecorder,
batch: &TransactionBatch,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut load_execute_time = Measure::start("load_execute_time");
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
// the likelihood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires.
let pre_balances = if transaction_status_sender.is_some() {
bank.collect_balances(batch)
} else {
vec![]
};
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
let pre_token_balances = if transaction_status_sender.is_some() {
collect_token_balances(bank, batch, &mut mint_decimals)
} else {
vec![]
};
let mut execute_timings = ExecuteTimings::default();
let (
mut loaded_accounts,
results,
inner_instructions,
transaction_logs,
mut retryable_txs,
tx_count,
signature_count,
) = bank.load_and_execute_transactions(
batch,
MAX_PROCESSING_AGE,
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
&mut execute_timings,
);
load_execute_time.stop();
let freeze_lock = bank.freeze_lock();
let mut record_time = Measure::start("record_time");
let (num_to_commit, retryable_record_txs) =
Self::record_transactions(bank.slot(), batch.transactions_iter(), &results, poh);
inc_new_counter_info!(
"banking_stage-record_transactions_num_to_commit",
*num_to_commit.as_ref().unwrap_or(&0)
);
inc_new_counter_info!(
"banking_stage-record_transactions_retryable_record_txs",
retryable_record_txs.len()
);
retryable_txs.extend(retryable_record_txs);
if num_to_commit.is_err() {
return (num_to_commit, retryable_txs);
}
record_time.stop();
let mut commit_time = Measure::start("commit_time");
let hashed_txs = batch.hashed_transactions();
let num_to_commit = num_to_commit.unwrap();
if num_to_commit != 0 {
let tx_results = bank.commit_transactions(
hashed_txs,
&mut loaded_accounts,
&results,
tx_count,
signature_count,
&mut execute_timings,
);
bank_utils::find_and_send_votes(hashed_txs, &tx_results, Some(gossip_vote_sender));
if let Some(transaction_status_sender) = transaction_status_sender {
let txs = batch.transactions_iter().cloned().collect();
let post_balances = bank.collect_balances(batch);
let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals);
transaction_status_sender.send_transaction_status_batch(
bank.clone(),
txs,
tx_results.execution_results,
TransactionBalancesSet::new(pre_balances, post_balances),
TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances),
inner_instructions,
transaction_logs,
tx_results.rent_debits,
);
}
}
commit_time.stop();
drop(freeze_lock);
debug!(
"bank: {} process_and_record_locked: {}us record: {}us commit: {}us txs_len: {}",
bank.slot(),
load_execute_time.as_us(),
record_time.as_us(),
commit_time.as_us(),
hashed_txs.len(),
);
debug!(
"process_and_record_transactions_locked: {:?}",
execute_timings
);
(Ok(num_to_commit), retryable_txs)
}
pub fn process_and_record_transactions(
bank: &Arc<Bank>,
txs: &[HashedTransaction],
poh: &TransactionRecorder,
chunk_offset: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut lock_time = Measure::start("lock_time");
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let batch = bank.prepare_hashed_batch(txs);
lock_time.stop();
let (result, mut retryable_txs) = Self::process_and_record_transactions_locked(
bank,
poh,
&batch,
transaction_status_sender,
gossip_vote_sender,
);
retryable_txs.iter_mut().for_each(|x| *x += chunk_offset);
let mut unlock_time = Measure::start("unlock_time");
// Once the accounts are new transactions can enter the pipeline to process them
drop(batch);
unlock_time.stop();
debug!(
"bank: {} lock: {}us unlock: {}us txs_len: {}",
bank.slot(),
lock_time.as_us(),
unlock_time.as_us(),
txs.len(),
);
(result, retryable_txs)
}
/// Sends transactions to the bank.
///
/// Returns the number of transactions successfully processed by the bank, which may be less
/// than the total number if max PoH height was reached and the bank halted
fn process_transactions(
bank: &Arc<Bank>,
bank_creation_time: &Instant,
transactions: &[HashedTransaction],
poh: &TransactionRecorder,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (usize, Vec<usize>) {
let mut chunk_start = 0;
let mut unprocessed_txs = vec![];
while chunk_start != transactions.len() {
let chunk_end = std::cmp::min(
transactions.len(),
chunk_start + MAX_NUM_TRANSACTIONS_PER_BATCH,
);
let (result, retryable_txs_in_chunk) = Self::process_and_record_transactions(
bank,
&transactions[chunk_start..chunk_end],
poh,
chunk_start,
transaction_status_sender.clone(),
gossip_vote_sender,
);
trace!("process_transactions result: {:?}", result);
// Add the retryable txs (transactions that errored in a way that warrants a retry)
// to the list of unprocessed txs.
unprocessed_txs.extend_from_slice(&retryable_txs_in_chunk);
// If `bank_creation_time` is None, it's a test so ignore the option so
// allow processing
let should_bank_still_be_processing_txs =
Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot);
match (result, should_bank_still_be_processing_txs) {
(Err(PohRecorderError::MaxHeightReached), _) | (_, false) => {
info!(
"process transactions: max height reached slot: {} height: {}",
bank.slot(),
bank.tick_height()
);
// process_and_record_transactions has returned all retryable errors in
// transactions[chunk_start..chunk_end], so we just need to push the remaining
// transactions into the unprocessed queue.
unprocessed_txs.extend(chunk_end..transactions.len());
break;
}
_ => (),
}
// Don't exit early on any other type of error, continue processing...
chunk_start = chunk_end;
}
(chunk_start, unprocessed_txs)
}
// This function creates a filter of transaction results with Ok() for every pending
// transaction. The non-pending transactions are marked with TransactionError
fn prepare_filter_for_pending_transactions(
transactions_len: usize,
pending_tx_indexes: &[usize],
) -> Vec<transaction::Result<()>> {
let mut mask = vec![Err(TransactionError::BlockhashNotFound); transactions_len];
pending_tx_indexes.iter().for_each(|x| mask[*x] = Ok(()));
mask
}
// This function returns a vector containing index of all valid transactions. A valid
// transaction has result Ok() as the value
fn filter_valid_transaction_indexes(
valid_txs: &[TransactionCheckResult],
transaction_indexes: &[usize],
) -> Vec<usize> {
let valid_transactions = valid_txs
.iter()
.enumerate()
.filter_map(|(index, (x, _h))| if x.is_ok() { Some(index) } else { None })
.collect_vec();
valid_transactions
.iter()
.map(|x| transaction_indexes[*x])
.collect()
}
/// Read the transaction message from packet data
fn packet_message(packet: &Packet) -> Option<&[u8]> {
let (sig_len, sig_size) = decode_shortu16_len(&packet.data).ok()?;
let msg_start = sig_len
.checked_mul(size_of::<Signature>())
.and_then(|v| v.checked_add(sig_size))?;
let msg_end = packet.meta.size;
Some(&packet.data[msg_start..msg_end])
}
// This function deserializes packets into transactions, computes the blake3 hash of transaction messages,
// and verifies secp256k1 instructions. A list of valid transactions are returned with their message hashes
// and packet indexes.
fn transactions_from_packets(
msgs: &Packets,
transaction_indexes: &[usize],
libsecp256k1_0_5_upgrade_enabled: bool,
votes_only: bool,
) -> (Vec<HashedTransaction<'static>>, Vec<usize>) {
transaction_indexes
.iter()
.filter_map(|tx_index| {
let p = &msgs.packets[*tx_index];
if votes_only && !p.meta.is_simple_vote_tx {
return None;
}
let tx: Transaction = limited_deserialize(&p.data[0..p.meta.size]).ok()?;
tx.verify_precompiles(libsecp256k1_0_5_upgrade_enabled)
.ok()?;
let message_bytes = Self::packet_message(p)?;
let message_hash = Message::hash_raw_message(message_bytes);
Some((
HashedTransaction::new(Cow::Owned(tx), message_hash),
tx_index,
))
})
.unzip()
}
/// This function filters pending packets that are still valid
/// # Arguments
/// * `transactions` - a batch of transactions deserialized from packets
/// * `transaction_to_packet_indexes` - maps each transaction to a packet index
/// * `pending_indexes` - identifies which indexes in the `transactions` list are still pending
fn filter_pending_packets_from_pending_txs(
bank: &Arc<Bank>,
transactions: &[HashedTransaction],
transaction_to_packet_indexes: &[usize],
pending_indexes: &[usize],
) -> Vec<usize> {
let filter =
Self::prepare_filter_for_pending_transactions(transactions.len(), pending_indexes);
let mut error_counters = ErrorCounters::default();
// The following code also checks if the blockhash for a transaction is too old
// The check accounts for
// 1. Transaction forwarding delay
// 2. The slot at which the next leader will actually process the transaction
// Drop the transaction if it will expire by the time the next node receives and processes it
let api = perf_libs::api();
let max_tx_fwd_delay = if api.is_none() {
MAX_TRANSACTION_FORWARDING_DELAY
} else {
MAX_TRANSACTION_FORWARDING_DELAY_GPU
};
let results = bank.check_transactions(
transactions,
&filter,
(MAX_PROCESSING_AGE)
.saturating_sub(max_tx_fwd_delay)
.saturating_sub(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET as usize),
&mut error_counters,
);
Self::filter_valid_transaction_indexes(&results, transaction_to_packet_indexes)
}
fn process_packets_transactions(
bank: &Arc<Bank>,
bank_creation_time: &Instant,
poh: &TransactionRecorder,
msgs: &Packets,
packet_indexes: Vec<usize>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
banking_stage_stats: &BankingStageStats,
) -> (usize, usize, Vec<usize>) {
let mut packet_conversion_time = Measure::start("packet_conversion");
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
msgs,
&packet_indexes,
bank.libsecp256k1_0_5_upgrade_enabled(),
bank.vote_only_bank(),
);
packet_conversion_time.stop();
debug!(
"bank: {} filtered transactions {}",
bank.slot(),
transactions.len()
);
let tx_len = transactions.len();
let mut process_tx_time = Measure::start("process_tx_time");
let (processed, unprocessed_tx_indexes) = Self::process_transactions(
bank,
bank_creation_time,
&transactions,
poh,
transaction_status_sender,
gossip_vote_sender,
);
process_tx_time.stop();
let unprocessed_tx_count = unprocessed_tx_indexes.len();
let mut filter_pending_packets_time = Measure::start("filter_pending_packets_time");
let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
bank,
&transactions,
&transaction_to_packet_indexes,
&unprocessed_tx_indexes,
);
filter_pending_packets_time.stop();
inc_new_counter_info!(
"banking_stage-dropped_tx_before_forwarding",
unprocessed_tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
);
banking_stage_stats
.packet_conversion_elapsed
.fetch_add(packet_conversion_time.as_us(), Ordering::Relaxed);
banking_stage_stats
.transaction_processing_elapsed
.fetch_add(process_tx_time.as_us(), Ordering::Relaxed);
banking_stage_stats
.filter_pending_packets_elapsed
.fetch_add(filter_pending_packets_time.as_us(), Ordering::Relaxed);
(processed, tx_len, filtered_unprocessed_packet_indexes)
}
fn filter_unprocessed_packets(
bank: &Arc<Bank>,
msgs: &Packets,
transaction_indexes: &[usize],
my_pubkey: &Pubkey,
next_leader: Option<Pubkey>,
) -> Vec<usize> {
// Check if we are the next leader. If so, let's not filter the packets
// as we'll filter it again while processing the packets.
// Filtering helps if we were going to forward the packets to some other node
if let Some(leader) = next_leader {
if leader == *my_pubkey {
return transaction_indexes.to_vec();
}
}
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
msgs,
&transaction_indexes,
bank.libsecp256k1_0_5_upgrade_enabled(),
bank.vote_only_bank(),
);
let tx_count = transaction_to_packet_indexes.len();
let unprocessed_tx_indexes = (0..transactions.len()).collect_vec();
let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
bank,
&transactions,
&transaction_to_packet_indexes,
&unprocessed_tx_indexes,
);
inc_new_counter_info!(
"banking_stage-dropped_tx_before_forwarding",
tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
);
filtered_unprocessed_packet_indexes
}
fn generate_packet_indexes(vers: &PinnedVec<Packet>) -> Vec<usize> {
vers.iter()
.enumerate()
.filter_map(
|(index, ver)| {
if !ver.meta.discard {
Some(index)
} else {
None
}
},
)
.collect()
}
#[allow(clippy::too_many_arguments)]
/// Process the incoming packets
pub fn process_packets(
my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
poh: &Arc<Mutex<PohRecorder>>,
recv_start: &mut Instant,
recv_timeout: Duration,
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
buffered_packets: &mut UnprocessedPackets,
banking_stage_stats: &BankingStageStats,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
recorder: &TransactionRecorder,
) -> Result<(), RecvTimeoutError> {
let mut recv_time = Measure::start("process_packets_recv");
let mms = verified_receiver.recv_timeout(recv_timeout)?;
recv_time.stop();
let mms_len = mms.len();
let count: usize = mms.iter().map(|x| x.packets.len()).sum();
debug!(
"@{:?} process start stalled for: {:?}ms txs: {} id: {}",
timestamp(),
duration_as_ms(&recv_start.elapsed()),
count,
id,
);
inc_new_counter_debug!("banking_stage-transactions_received", count);
let mut proc_start = Measure::start("process_packets_transactions_process");
let mut new_tx_count = 0;
let mut mms_iter = mms.into_iter();
let mut dropped_packets_count = 0;
let mut dropped_packet_batches_count = 0;
let mut newly_buffered_packets_count = 0;
while let Some(msgs) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
let bank_start = poh.lock().unwrap().bank_start();
if PohRecorder::get_bank_still_processing_txs(&bank_start).is_none() {
Self::push_unprocessed(
buffered_packets,
msgs,
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
duplicates,
banking_stage_stats,
);
continue;
}
let (bank, bank_creation_time) = bank_start.unwrap();
let (processed, verified_txs_len, unprocessed_indexes) =
Self::process_packets_transactions(
&bank,
&bank_creation_time,
recorder,
&msgs,
packet_indexes,
transaction_status_sender.clone(),
gossip_vote_sender,
banking_stage_stats,
);
new_tx_count += processed;
// Collect any unprocessed transactions in this batch for forwarding
Self::push_unprocessed(
buffered_packets,
msgs,
unprocessed_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
duplicates,
banking_stage_stats,
);
// If there were retryable transactions, add the unexpired ones to the buffered queue
if processed < verified_txs_len {
let mut handle_retryable_packets_time = Measure::start("handle_retryable_packets");
let next_leader = poh.lock().unwrap().next_slot_leader();
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
#[allow(clippy::while_let_on_iterator)]
while let Some(msgs) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
let unprocessed_indexes = Self::filter_unprocessed_packets(
&bank,
&msgs,
&packet_indexes,
my_pubkey,
next_leader,
);
Self::push_unprocessed(
buffered_packets,
msgs,
unprocessed_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
duplicates,
banking_stage_stats,
);
}
handle_retryable_packets_time.stop();
banking_stage_stats
.handle_retryable_packets_elapsed
.fetch_add(handle_retryable_packets_time.as_us(), Ordering::Relaxed);
}
}
proc_start.stop();
debug!(
"@{:?} done processing transaction batches: {} time: {:?}ms tx count: {} tx/s: {} total count: {} id: {}",
timestamp(),
mms_len,
proc_start.as_ms(),
new_tx_count,
(new_tx_count as f32) / (proc_start.as_s()),
count,
id,
);
banking_stage_stats
.process_packets_elapsed
.fetch_add(proc_start.as_us(), Ordering::Relaxed);
banking_stage_stats
.process_packets_count
.fetch_add(count, Ordering::Relaxed);
banking_stage_stats
.new_tx_count
.fetch_add(new_tx_count, Ordering::Relaxed);
banking_stage_stats
.dropped_packet_batches_count
.fetch_add(dropped_packet_batches_count, Ordering::Relaxed);
banking_stage_stats
.dropped_packets_count
.fetch_add(dropped_packets_count, Ordering::Relaxed);
banking_stage_stats
.newly_buffered_packets_count
.fetch_add(newly_buffered_packets_count, Ordering::Relaxed);
banking_stage_stats
.current_buffered_packet_batches_count
.swap(buffered_packets.len(), Ordering::Relaxed);
banking_stage_stats.current_buffered_packets_count.swap(
buffered_packets.iter().map(|packets| packets.1.len()).sum(),
Ordering::Relaxed,
);
*recv_start = Instant::now();
Ok(())
}
fn push_unprocessed(
unprocessed_packets: &mut UnprocessedPackets,
packets: Packets,
mut packet_indexes: Vec<usize>,
dropped_packet_batches_count: &mut usize,
dropped_packets_count: &mut usize,
newly_buffered_packets_count: &mut usize,
batch_limit: usize,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
banking_stage_stats: &BankingStageStats,
) {
{
let original_packets_count = packet_indexes.len();
let mut packet_duplicate_check_time = Measure::start("packet_duplicate_check");
let mut duplicates = duplicates.lock().unwrap();
let (cache, hasher) = duplicates.deref_mut();
packet_indexes.retain(|i| {
let packet_hash = hasher.hash_packet(&packets.packets[*i]);
match cache.get_mut(&packet_hash) {
Some(_hash) => false,
None => {
cache.put(packet_hash, ());
true
}
}
});
packet_duplicate_check_time.stop();
banking_stage_stats
.packet_duplicate_check_elapsed
.fetch_add(packet_duplicate_check_time.as_us(), Ordering::Relaxed);
banking_stage_stats
.dropped_duplicated_packets_count
.fetch_add(
original_packets_count.saturating_sub(packet_indexes.len()),
Ordering::Relaxed,
);
}
if Self::packet_has_more_unprocessed_transactions(&packet_indexes) {
if unprocessed_packets.len() >= batch_limit {
*dropped_packet_batches_count += 1;
if let Some(dropped_batch) = unprocessed_packets.pop_front() {
*dropped_packets_count += dropped_batch.1.len();
}
}
*newly_buffered_packets_count += packet_indexes.len();
unprocessed_packets.push_back((packets, packet_indexes, false));
}
}
fn packet_has_more_unprocessed_transactions(packet_indexes: &[usize]) -> bool {
!packet_indexes.is_empty()
}
pub fn join(self) -> thread::Result<()> {
for bank_thread_hdl in self.bank_thread_hdls {
bank_thread_hdl.join()?;
}
Ok(())
}
}
pub(crate) fn next_leader_tpu(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
) -> Option<std::net::SocketAddr> {
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu)
}
fn next_leader_tpu_forwards(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
) -> Option<std::net::SocketAddr> {
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_forwards)
}
pub(crate) fn next_leader_tpu_vote(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
) -> Option<std::net::SocketAddr> {
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_vote)
}
fn next_leader_x<F>(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
port_selector: F,
) -> Option<std::net::SocketAddr>
where
F: FnOnce(&ContactInfo) -> SocketAddr,
{
if let Some(leader_pubkey) = poh_recorder
.lock()
.unwrap()
.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET)
{
cluster_info.lookup_contact_info(&leader_pubkey, port_selector)
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam_channel::unbounded;
use itertools::Itertools;
use safecoin_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_ledger::{
blockstore::{entries_to_test_shreds, Blockstore},
entry::{next_entry, Entry, EntrySlice},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
leader_schedule_cache::LeaderScheduleCache,
};
use solana_perf::packet::to_packets_chunked;
use solana_poh::{
poh_recorder::{create_test_recorder, Record, WorkingBank, WorkingBankEntry},
poh_service::PohService,
};
use solana_rpc::transaction_status_service::TransactionStatusService;
use safecoin_sdk::{
hash::Hash,
instruction::InstructionError,
poh_config::PohConfig,
signature::{Keypair, Signer},
system_instruction::SystemError,
system_transaction,
transaction::TransactionError,
};
use solana_streamer::socket::SocketAddrSpace;
use safecoin_transaction_status::TransactionWithStatusMeta;
use solana_vote_program::vote_transaction;
use std::{
net::SocketAddr,
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::Receiver,
},
thread::sleep,
};
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn test_banking_stage_shutdown1() {
let genesis_config = create_genesis_config(2).genesis_config;
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let (verified_sender, verified_receiver) = unbounded();
let (gossip_verified_vote_sender, gossip_verified_vote_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let (vote_forward_sender, _vote_forward_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
gossip_verified_vote_receiver,
None,
vote_forward_sender,
);
drop(verified_sender);
drop(gossip_verified_vote_sender);
drop(tpu_vote_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();
poh_service.join().unwrap();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_banking_stage_tick() {
solana_logger::setup();
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(2);
genesis_config.ticks_per_slot = 4;
let num_extra_ticks = 2;
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
target_tick_count: Some(bank.max_tick_height() + num_extra_ticks),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (vote_forward_sender, _vote_forward_receiver) = unbounded();
let (verified_gossip_vote_sender, verified_gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
verified_gossip_vote_receiver,
None,
vote_forward_sender,
);
trace!("sending bank");
drop(verified_sender);
drop(verified_gossip_vote_sender);
drop(tpu_vote_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
.map(|(_bank, (entry, _tick_height))| entry)
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
}
Blockstore::destroy(&ledger_path).unwrap();
}
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
with_vers.iter_mut().for_each(|(b, v)| {
b.packets
.iter_mut()
.zip(v)
.for_each(|(p, f)| p.meta.discard = *f == 0)
});
with_vers.into_iter().map(|(b, _)| b).collect()
}
#[test]
fn test_banking_stage_entries_only() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10);
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let (gossip_verified_vote_sender, gossip_verified_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
// limit tick count to avoid clearing working_bank at PohRecord then
// PohRecorderError(MaxHeightReached) at BankingStage
target_tick_count: Some(bank.max_tick_height() - 1),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
gossip_verified_vote_receiver,
None,
gossip_vote_sender,
);
// fund another account so we can send 2 good transactions in a single batch.
let keypair = Keypair::new();
let fund_tx =
system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash);
bank.process_transaction(&fund_tx).unwrap();
// good tx
let to = safecoin_sdk::pubkey::new_rand();
let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash);
// good tx, but no verify
let to2 = safecoin_sdk::pubkey::new_rand();
let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let to3 = safecoin_sdk::pubkey::new_rand();
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
// send 'em over
let packets = to_packets_chunked(&[tx_no_ver, tx_anf, tx], 3);
// glad they all fit
assert_eq!(packets.len(), 1);
let packets = packets
.into_iter()
.map(|packets| (packets, vec![0u8, 1u8, 1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender // no_ver, anf, tx
.send(packets)
.unwrap();
drop(verified_sender);
drop(tpu_vote_sender);
drop(gossip_verified_vote_sender);
// wait until banking_stage to finish up all packets
banking_stage.join().unwrap();
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
let mut blockhash = start_hash;
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
bank.process_transaction(&fund_tx).unwrap();
//receive entries + ticks
loop {
let entries: Vec<Entry> = entry_receiver
.iter()
.map(|(_bank, (entry, _tick_height))| entry)
.collect();
assert!(entries.verify(&blockhash));
if !entries.is_empty() {
blockhash = entries.last().unwrap().hash;
for entry in entries {
bank.process_transactions(&entry.transactions)
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
}
if bank.get_balance(&to) == 1 {
break;
}
sleep(Duration::from_millis(200));
}
assert_eq!(bank.get_balance(&to), 1);
assert_eq!(bank.get_balance(&to2), 0);
drop(entry_receiver);
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_banking_stage_entryfication() {
solana_logger::setup();
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(2);
let (verified_sender, verified_receiver) = unbounded();
// Process a batch that includes a transaction that receives two lamports.
let alice = Keypair::new();
let tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, genesis_config.hash());
let packets = to_packets_chunked(&[tx], 1);
let packets = packets
.into_iter()
.map(|packets| (packets, vec![1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender.send(packets).unwrap();
// Process a second batch that uses the same from account, so conflicts with above TX
let tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash());
let packets = to_packets_chunked(&[tx], 1);
let packets = packets
.into_iter()
.map(|packets| (packets, vec![1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender.send(packets).unwrap();
let (vote_sender, vote_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let entry_receiver = {
// start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
// limit tick count to avoid clearing working_bank at
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
target_tick_count: Some(bank.max_tick_height() - 1),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let _banking_stage = BankingStage::new_num_threads(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
vote_receiver,
3,
None,
gossip_vote_sender,
);
// wait for banking_stage to eat the packets
while bank.get_balance(&alice.pubkey()) < 2 {
sleep(Duration::from_millis(100));
}
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
entry_receiver
};
drop(verified_sender);
drop(vote_sender);
drop(tpu_vote_sender);
// consume the entire entry_receiver, feed it into a new bank
// check that the balance is what we expect.
let entries: Vec<_> = entry_receiver
.iter()
.map(|(_bank, (entry, _tick_height))| entry)
.collect();
let bank = Bank::new_no_wallclock_throttle(&genesis_config);
for entry in &entries {
bank.process_transactions(&entry.transactions)
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
// Assert the user holds two lamports, not three. If the stage only outputs one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
assert_eq!(bank.get_balance(&alice.pubkey()), 2);
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_bank_record_transactions() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: std::u64::MAX,
};
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
// TODO use record_receiver
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = safecoin_sdk::pubkey::new_rand();
let keypair2 = Keypair::new();
let pubkey2 = safecoin_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()),
];
let mut results = vec![(Ok(()), None), (Ok(()), None)];
let _ = BankingStage::record_transactions(
bank.slot(),
transactions.iter(),
&results,
&recorder,
);
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert_eq!(entry.transactions.len(), transactions.len());
// InstructionErrors should still be recorded
results[0] = (
Err(TransactionError::InstructionError(
1,
SystemError::ResultWithNegativeLamports.into(),
)),
None,
);
let (res, retryable) = BankingStage::record_transactions(
bank.slot(),
transactions.iter(),
&results,
&recorder,
);
res.unwrap();
assert!(retryable.is_empty());
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert_eq!(entry.transactions.len(), transactions.len());
// Other TransactionErrors should not be recorded
results[0] = (Err(TransactionError::AccountNotFound), None);
let (res, retryable) = BankingStage::record_transactions(
bank.slot(),
transactions.iter(),
&results,
&recorder,
);
res.unwrap();
assert!(retryable.is_empty());
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert_eq!(entry.transactions.len(), transactions.len() - 1);
// Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions),
// record_transactions should throw MaxHeightReached and return the set of retryable
// txs
let (res, retryable) = BankingStage::record_transactions(
bank.slot() + 1,
transactions.iter(),
&results,
&recorder,
);
assert_matches!(res, Err(PohRecorderError::MaxHeightReached));
// The first result was an error so it's filtered out. The second result was Ok(),
// so it should be marked as retryable
assert_eq!(retryable, vec![1]);
// Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err());
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_bank_prepare_filter_for_pending_transaction() {
assert_eq!(
BankingStage::prepare_filter_for_pending_transactions(6, &[2, 4, 5]),
vec![
Err(TransactionError::BlockhashNotFound),
Err(TransactionError::BlockhashNotFound),
Ok(()),
Err(TransactionError::BlockhashNotFound),
Ok(()),
Ok(())
]
);
assert_eq!(
BankingStage::prepare_filter_for_pending_transactions(6, &[0, 2, 3]),
vec![
Ok(()),
Err(TransactionError::BlockhashNotFound),
Ok(()),
Ok(()),
Err(TransactionError::BlockhashNotFound),
Err(TransactionError::BlockhashNotFound),
]
);
}
#[test]
fn test_bank_filter_valid_transaction_indexes() {
assert_eq!(
BankingStage::filter_valid_transaction_indexes(
&[
(Err(TransactionError::BlockhashNotFound), None),
(Err(TransactionError::BlockhashNotFound), None),
(Ok(()), None),
(Err(TransactionError::BlockhashNotFound), None),
(Ok(()), None),
(Ok(()), None),
],
&[2, 4, 5, 9, 11, 13]
),
[5, 11, 13]
);
assert_eq!(
BankingStage::filter_valid_transaction_indexes(
&[
(Ok(()), None),
(Err(TransactionError::BlockhashNotFound), None),
(Err(TransactionError::BlockhashNotFound), None),
(Ok(()), None),
(Ok(()), None),
(Ok(()), None),
],
&[1, 6, 7, 9, 31, 43]
),
[1, 9, 31, 43]
);
}
#[test]
fn test_should_process_or_forward_packets() {
let my_pubkey = safecoin_sdk::pubkey::new_rand();
let my_pubkey1 = safecoin_sdk::pubkey::new_rand();
let bank = Arc::new(Bank::default());
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, Some(&bank), false, false),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, None, false, false),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey1, None, None, false, false),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
None,
false,
false
),
BufferedPacketsDecision::Forward
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
None,
true,
true
),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
None,
true,
false
),
BufferedPacketsDecision::ForwardAndHold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
Some(&bank),
false,
false
),
BufferedPacketsDecision::Consume(_)
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey1,
Some(my_pubkey1),
None,
false,
false
),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey1,
Some(my_pubkey1),
Some(&bank),
false,
false
),
BufferedPacketsDecision::Consume(_)
);
}
fn create_slow_genesis_config(lamports: u64) -> GenesisConfigInfo {
let mut config_info = create_genesis_config(lamports);
// For these tests there's only 1 slot, don't want to run out of ticks
config_info.genesis_config.ticks_per_slot *= 8;
config_info
}
#[test]
fn test_bank_process_and_record_transactions() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let pubkey = safecoin_sdk::pubkey::new_rand();
let transactions =
vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash())
.into(),
];
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
None,
&gossip_vote_sender,
)
.0
.unwrap();
poh_recorder.lock().unwrap().tick();
let mut done = false;
// read entries until I find mine, might be ticks...
while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() {
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
assert_eq!(bank.get_balance(&pubkey), 1);
done = true;
}
if done {
break;
}
}
trace!("done ticking");
assert!(done);
let transactions = vec![system_transaction::transfer(
&mint_keypair,
&pubkey,
2,
genesis_config.hash(),
)
.into()];
assert_matches!(
BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
None,
&gossip_vote_sender,
)
.0,
Err(PohRecorderError::MaxHeightReached)
);
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
assert_eq!(bank.get_balance(&pubkey), 1);
}
Blockstore::destroy(&ledger_path).unwrap();
}
fn simulate_poh(
record_receiver: CrossbeamReceiver<Record>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
) -> JoinHandle<()> {
let poh_recorder = poh_recorder.clone();
let is_exited = poh_recorder.lock().unwrap().is_exited.clone();
let tick_producer = Builder::new()
.name("solana-simulate_poh".to_string())
.spawn(move || loop {
PohService::read_record_receiver_and_process(
&poh_recorder,
&record_receiver,
Duration::from_millis(10),
);
if is_exited.load(Ordering::Relaxed) {
break;
}
});
tick_producer.unwrap()
}
#[test]
fn test_bank_process_and_record_transactions_account_in_use() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let pubkey = safecoin_sdk::pubkey::new_rand();
let pubkey1 = safecoin_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()).into(),
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()).into(),
];
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (result, unprocessed) = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
None,
&gossip_vote_sender,
);
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
assert!(result.is_ok());
assert_eq!(unprocessed.len(), 1);
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_filter_valid_packets() {
solana_logger::setup();
let mut all_packets = (0..16)
.map(|packets_id| {
let packets = Packets::new(
(0..32)
.map(|packet_id| {
let mut p = Packet::default();
p.meta.port = packets_id << 8 | packet_id;
p
})
.collect_vec(),
);
let valid_indexes = (0..32)
.filter_map(|x| if x % 2 != 0 { Some(x as usize) } else { None })
.collect_vec();
(packets, valid_indexes, false)
})
.collect_vec();
let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter());
assert_eq!(result.len(), 256);
let _ = result
.into_iter()
.enumerate()
.map(|(index, p)| {
let packets_id = index / 16;
let packet_id = (index % 16) * 2 + 1;
assert_eq!(p.meta.port, (packets_id << 8 | packet_id) as u16);
})
.collect_vec();
all_packets[0].2 = true;
let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter());
assert_eq!(result.len(), 240);
}
#[test]
fn test_process_transactions_returns_unprocessed_txs() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let pubkey = safecoin_sdk::pubkey::new_rand();
let transactions =
vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash())
.into(),
];
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some((4, 4)),
bank.ticks_per_slot(),
&safecoin_sdk::pubkey::new_rand(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
// Poh Recorder has no working bank, so should throw MaxHeightReached error on
// record
let recorder = poh_recorder.recorder();
let poh_simulator = simulate_poh(record_receiver, &Arc::new(Mutex::new(poh_recorder)));
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (processed_transactions_count, mut retryable_txs) =
BankingStage::process_transactions(
&bank,
&Instant::now(),
&transactions,
&recorder,
None,
&gossip_vote_sender,
);
assert_eq!(processed_transactions_count, 0,);
retryable_txs.sort_unstable();
let expected: Vec<usize> = (0..transactions.len()).collect();
assert_eq!(retryable_txs, expected);
recorder.is_exited.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_write_persist_transaction_status() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let pubkey = safecoin_sdk::pubkey::new_rand();
let pubkey1 = safecoin_sdk::pubkey::new_rand();
let keypair1 = Keypair::new();
let success_tx =
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash());
let success_signature = success_tx.signatures[0];
let entry_1 = next_entry(&genesis_config.hash(), 1, vec![success_tx.clone()]);
let ix_error_tx =
system_transaction::transfer(&keypair1, &pubkey1, 10, genesis_config.hash());
let ix_error_signature = ix_error_tx.signatures[0];
let entry_2 = next_entry(&entry_1.hash, 1, vec![ix_error_tx.clone()]);
let fail_tx =
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash());
let entry_3 = next_entry(&entry_2.hash, 1, vec![fail_tx.clone()]);
let entries = vec![entry_1, entry_2, entry_3];
let transactions = vec![success_tx.into(), ix_error_tx.into(), fail_tx.into()];
bank.transfer(4, &mint_keypair, &keypair1.pubkey()).unwrap();
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let blockstore = Arc::new(blockstore);
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let shreds = entries_to_test_shreds(entries, bank.slot(), 0, true, 0);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(std::iter::once(&bank.slot())).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
Arc::new(AtomicU64::default()),
blockstore.clone(),
&Arc::new(AtomicBool::new(false)),
);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let _ = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
Some(TransactionStatusSender {
sender: transaction_status_sender,
enable_cpi_and_log_storage: false,
}),
&gossip_vote_sender,
);
transaction_status_service.join().unwrap();
let confirmed_block = blockstore.get_rooted_block(bank.slot(), false).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if transaction.signatures[0] == success_signature {
let meta = meta.unwrap();
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature {
let meta = meta.unwrap();
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
} else {
assert_eq!(meta, None);
}
}
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[allow(clippy::type_complexity)]
fn setup_conflicting_transactions(
ledger_path: &Path,
) -> (
Vec<Transaction>,
Arc<Bank>,
Arc<Mutex<PohRecorder>>,
Receiver<WorkingBankEntry>,
JoinHandle<()>,
) {
Blockstore::destroy(ledger_path).unwrap();
let genesis_config_info = create_slow_genesis_config(10_000);
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = &genesis_config_info;
let blockstore =
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config));
let exit = Arc::new(AtomicBool::default());
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some((4, 4)),
bank.ticks_per_slot(),
&safecoin_sdk::pubkey::new_rand(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
exit,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
// Set up unparallelizable conflicting transactions
let pubkey0 = safecoin_sdk::pubkey::new_rand();
let pubkey1 = safecoin_sdk::pubkey::new_rand();
let pubkey2 = safecoin_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()),
system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()),
system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()),
];
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
(
transactions,
bank,
poh_recorder,
entry_receiver,
poh_simulator,
)
}
#[test]
fn test_consume_buffered_packets() {
let ledger_path = get_tmp_ledger_path!();
{
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
setup_conflicting_transactions(&ledger_path);
let recorder = poh_recorder.lock().unwrap().recorder();
let num_conflicting_transactions = transactions.len();
let mut packets_vec = to_packets_chunked(&transactions, num_conflicting_transactions);
assert_eq!(packets_vec.len(), 1);
assert_eq!(packets_vec[0].packets.len(), num_conflicting_transactions);
let all_packets = packets_vec.pop().unwrap();
let mut buffered_packets: UnprocessedPackets = vec![(
all_packets,
(0..num_conflicting_transactions).into_iter().collect(),
false,
)]
.into_iter()
.collect();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
// When the working bank in poh_recorder is None, no packets should be processed
assert!(!poh_recorder.lock().unwrap().has_bank());
let max_tx_processing_ns = std::u128::MAX;
BankingStage::consume_buffered_packets(
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
&BankingStageStats::default(),
&recorder,
);
assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions);
// When the poh recorder has a bank, should process all non conflicting buffered packets.
// Processes one packet per iteration of the loop
for num_expected_unprocessed in (0..num_conflicting_transactions).rev() {
poh_recorder.lock().unwrap().set_bank(&bank);
BankingStage::consume_buffered_packets(
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
&BankingStageStats::default(),
&recorder,
);
if num_expected_unprocessed == 0 {
assert!(buffered_packets.is_empty())
} else {
assert_eq!(buffered_packets[0].1.len(), num_expected_unprocessed);
}
}
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_consume_buffered_packets_interrupted() {
let ledger_path = get_tmp_ledger_path!();
{
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
setup_conflicting_transactions(&ledger_path);
let num_conflicting_transactions = transactions.len();
let packets_vec = to_packets_chunked(&transactions, 1);
assert_eq!(packets_vec.len(), num_conflicting_transactions);
for single_packets in &packets_vec {
assert_eq!(single_packets.packets.len(), 1);
}
let mut buffered_packets: UnprocessedPackets = packets_vec
.clone()
.into_iter()
.map(|single_packets| (single_packets, vec![0], false))
.collect();
let (continue_sender, continue_receiver) = unbounded();
let (finished_packet_sender, finished_packet_receiver) = unbounded();
let test_fn = Some(move || {
finished_packet_sender.send(()).unwrap();
continue_receiver.recv().unwrap();
});
// When the poh recorder has a bank, it should process all non conflicting buffered packets.
// Because each conflicting transaction is in it's own `Packet` within `packets_vec`, then
// each iteration of this loop will process one element of `packets_vec`per iteration of the
// loop.
let interrupted_iteration = 1;
poh_recorder.lock().unwrap().set_bank(&bank);
let poh_recorder_ = poh_recorder.clone();
let recorder = poh_recorder_.lock().unwrap().recorder();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
// Start up thread to process the banks
let t_consume = Builder::new()
.name("consume-buffered-packets".to_string())
.spawn(move || {
BankingStage::consume_buffered_packets(
&Pubkey::default(),
std::u128::MAX,
&poh_recorder_,
&mut buffered_packets,
None,
&gossip_vote_sender,
test_fn,
&BankingStageStats::default(),
&recorder,
);
// Check everything is correct. All indexes after `interrupted_iteration`
// should still be unprocessed
assert_eq!(
buffered_packets.len(),
packets_vec[interrupted_iteration + 1..].len()
);
for ((remaining_unprocessed_packet, _, _forwarded), original_packet) in
buffered_packets
.iter()
.zip(&packets_vec[interrupted_iteration + 1..])
{
assert_eq!(
remaining_unprocessed_packet.packets[0],
original_packet.packets[0]
);
}
})
.unwrap();
for i in 0..=interrupted_iteration {
finished_packet_receiver.recv().unwrap();
if i == interrupted_iteration {
poh_recorder
.lock()
.unwrap()
.schedule_dummy_max_height_reached_failure();
}
continue_sender.send(()).unwrap();
}
t_consume.join().unwrap();
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_forwarder_budget() {
solana_logger::setup();
// Create `Packets` with 1 unprocessed element
let single_element_packets = Packets::new(vec![Packet::default()]);
let mut unprocessed_packets: UnprocessedPackets =
vec![(single_element_packets, vec![0], false)]
.into_iter()
.collect();
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let genesis_config_info = create_slow_genesis_config(10_000);
let GenesisConfigInfo { genesis_config, .. } = &genesis_config_info;
let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config));
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
// limit tick count to avoid clearing working_bank at
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
target_tick_count: Some(bank.max_tick_height() - 1),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let data_budget = DataBudget::default();
BankingStage::handle_forwarding(
&ForwardOption::ForwardTransaction,
&cluster_info,
&mut unprocessed_packets,
&poh_recorder,
&socket,
false,
&data_budget,
);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_push_unprocessed_batch_limit() {
solana_logger::setup();
// Create `Packets` with 2 unprocessed elements
let new_packets = Packets::new(vec![Packet::default(); 2]);
let mut unprocessed_packets: UnprocessedPackets =
vec![(new_packets, vec![0, 1], false)].into_iter().collect();
// Set the limit to 2
let batch_limit = 2;
// Create some new unprocessed packets
let new_packets = Packets::new(vec![Packet::default()]);
let packet_indexes = vec![];
let duplicates = Arc::new(Mutex::new((
LruCache::new(DEFAULT_LRU_SIZE),
PacketHasher::default(),
)));
let mut dropped_packet_batches_count = 0;
let mut dropped_packets_count = 0;
let mut newly_buffered_packets_count = 0;
let banking_stage_stats = BankingStageStats::default();
// Because the set of unprocessed `packet_indexes` is empty, the
// packets are not added to the unprocessed queue
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 1);
assert_eq!(dropped_packet_batches_count, 0);
assert_eq!(dropped_packets_count, 0);
assert_eq!(newly_buffered_packets_count, 0);
// Because the set of unprocessed `packet_indexes` is non-empty, the
// packets are added to the unprocessed queue
let packet_indexes = vec![0];
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets,
packet_indexes.clone(),
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(dropped_packet_batches_count, 0);
assert_eq!(dropped_packets_count, 0);
assert_eq!(newly_buffered_packets_count, 1);
// Because we've reached the batch limit, old unprocessed packets are
// dropped and the new one is appended to the end
let new_packets = Packets::new(vec![Packet::from_data(
Some(&SocketAddr::from(([127, 0, 0, 1], 10015))),
42,
)
.unwrap()]);
assert_eq!(unprocessed_packets.len(), batch_limit);
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
packet_indexes.clone(),
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]);
assert_eq!(dropped_packet_batches_count, 1);
assert_eq!(dropped_packets_count, 2);
assert_eq!(newly_buffered_packets_count, 2);
// Check duplicates are dropped (newly buffered shouldn't change)
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
3,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]);
assert_eq!(dropped_packet_batches_count, 1);
assert_eq!(dropped_packets_count, 2);
assert_eq!(newly_buffered_packets_count, 2);
}
#[test]
fn test_packet_message() {
let keypair = Keypair::new();
let pubkey = safecoin_sdk::pubkey::new_rand();
let blockhash = Hash::new_unique();
let transaction = system_transaction::transfer(&keypair, &pubkey, 1, blockhash);
let packet = Packet::from_data(None, &transaction).unwrap();
assert_eq!(
BankingStage::packet_message(&packet).unwrap().to_vec(),
transaction.message_data()
);
}
#[cfg(test)]
fn make_test_packets(
transactions: Vec<Transaction>,
vote_indexes: Vec<usize>,
) -> (Packets, Vec<usize>) {
let capacity = transactions.len();
let mut packets = Packets::with_capacity(capacity);
let mut packet_indexes = Vec::with_capacity(capacity);
packets.packets.resize(capacity, Packet::default());
for (index, tx) in transactions.iter().enumerate() {
Packet::populate_packet(&mut packets.packets[index], None, tx).ok();
packet_indexes.push(index);
}
for index in vote_indexes.iter() {
packets.packets[*index].meta.is_simple_vote_tx = true;
}
(packets, packet_indexes)
}
#[test]
fn test_transactions_from_packets() {
let keypair = Keypair::new();
let transfer_tx =
system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default());
let vote_tx = vote_transaction::new_vote_transaction(
vec![42],
Hash::default(),
Hash::default(),
&keypair,
&keypair,
&keypair,
None,
);
// packets with no votes
{
let vote_indexes = vec![];
let (packets, packet_indexes) =
make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes);
let mut votes_only = false;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
false,
votes_only,
);
assert_eq!(2, txs.len());
assert_eq!(vec![0, 1], tx_packet_index);
votes_only = true;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
false,
votes_only,
);
assert_eq!(0, txs.len());
assert_eq!(0, tx_packet_index.len());
}
// packets with some votes
{
let vote_indexes = vec![0, 2];
let (packets, packet_indexes) = make_test_packets(
vec![vote_tx.clone(), transfer_tx, vote_tx.clone()],
vote_indexes,
);
let mut votes_only = false;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
false,
votes_only,
);
assert_eq!(3, txs.len());
assert_eq!(vec![0, 1, 2], tx_packet_index);
votes_only = true;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
false,
votes_only,
);
assert_eq!(2, txs.len());
assert_eq!(vec![0, 2], tx_packet_index);
}
// packets with all votes
{
let vote_indexes = vec![0, 1, 2];
let (packets, packet_indexes) = make_test_packets(
vec![vote_tx.clone(), vote_tx.clone(), vote_tx],
vote_indexes,
);
let mut votes_only = false;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
false,
votes_only,
);
assert_eq!(3, txs.len());
assert_eq!(vec![0, 1, 2], tx_packet_index);
votes_only = true;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
false,
votes_only,
);
assert_eq!(3, txs.len());
assert_eq!(vec![0, 1, 2], tx_packet_index);
}
}
}
| 38.355483 | 118 | 0.551282 |
673d066fcaeea1e68abfe3ee4ab1c227ae34791a | 33,784 | //! This module contains the implementation of a virtual element node `VTag`.
use super::{
Attributes, Classes, Listener, Listeners, Patch, Reform, Transformer, VDiff, VList, VNode,
};
use crate::html::NodeRef;
use crate::utils::document;
use cfg_if::cfg_if;
use cfg_match::cfg_match;
use log::warn;
use std::borrow::Cow;
use std::cmp::PartialEq;
use std::fmt;
use std::rc::Rc;
cfg_if! {
if #[cfg(feature = "std_web")] {
use crate::html::EventListener;
#[allow(unused_imports)]
use stdweb::{_js_impl, js};
use stdweb::unstable::TryFrom;
use stdweb::web::html_element::{InputElement, TextAreaElement};
use stdweb::web::{Element, IElement, INode, Node};
} else if #[cfg(feature = "web_sys")] {
use gloo::events::EventListener;
use std::ops::Deref;
use wasm_bindgen::JsCast;
use web_sys::{
Element, HtmlInputElement as InputElement, HtmlTextAreaElement as TextAreaElement, Node,
};
}
}
/// SVG namespace string used for creating svg elements
pub const SVG_NAMESPACE: &str = "http://www.w3.org/2000/svg";
/// Default namespace for html elements
pub const HTML_NAMESPACE: &str = "http://www.w3.org/1999/xhtml";
/// A type for a virtual
/// [Element](https://developer.mozilla.org/en-US/docs/Web/API/Element)
/// representation.
pub struct VTag {
/// A tag of the element.
tag: Cow<'static, str>,
/// A reference to the `Element`.
pub reference: Option<Element>,
/// List of attached listeners.
pub listeners: Listeners,
/// List of attributes.
pub attributes: Attributes,
/// List of children nodes
pub children: VList,
/// List of attached classes.
pub classes: Classes,
/// Contains a value of an
/// [InputElement](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input).
pub value: Option<String>,
/// Contains
/// [kind](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#Form_%3Cinput%3E_types)
/// value of an `InputElement`.
pub kind: Option<String>,
/// Represents `checked` attribute of
/// [input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#attr-checked).
/// It exists to override standard behavior of `checked` attribute, because
/// in original HTML it sets `defaultChecked` value of `InputElement`, but for reactive
/// frameworks it's more useful to control `checked` value of an `InputElement`.
pub checked: bool,
/// A node reference used for DOM access in Component lifecycle methods
pub node_ref: NodeRef,
/// Keeps handler for attached listeners to have an opportunity to drop them later.
captured: Vec<EventListener>,
pub key: String
}
impl Clone for VTag {
fn clone(&self) -> Self {
VTag {
tag: self.tag.clone(),
reference: None,
listeners: self.listeners.clone(),
attributes: self.attributes.clone(),
children: self.children.clone(),
classes: self.classes.clone(),
value: self.value.clone(),
kind: self.kind.clone(),
checked: self.checked,
node_ref: self.node_ref.clone(),
key: self.key.clone(),
captured: Vec::new(),
}
}
}
impl VTag {
/// Creates a new `VTag` instance with `tag` name (cannot be changed later in DOM).
pub fn new<S: Into<Cow<'static, str>>>(tag: S) -> Self {
VTag {
tag: tag.into(),
reference: None,
classes: Classes::new(),
attributes: Attributes::new(),
listeners: Vec::new(),
captured: Vec::new(),
children: VList::new_without_placeholder(),
node_ref: NodeRef::default(),
key: String::default(),
value: None,
kind: None,
// In HTML node `checked` attribute sets `defaultChecked` parameter,
// but we use own field to control real `checked` parameter
checked: false,
}
}
/// Returns tag of an `Element`. In HTML tags are always uppercase.
pub fn tag(&self) -> &str {
&self.tag
}
/// Add `VNode` child.
pub fn add_child(&mut self, child: VNode) {
self.children.add_child(child);
}
/// Add multiple `VNode` children.
pub fn add_children(&mut self, children: Vec<VNode>) {
for child in children {
self.add_child(child);
}
}
/// Adds a single class to this virtual node. Actually it will set by
/// [Element.setAttribute](https://developer.mozilla.org/en-US/docs/Web/API/Element/setAttribute)
/// call later.
pub fn add_class(&mut self, class: &str) {
self.classes.push(class);
}
/// Adds multiple classes to this virtual node. Actually it will set by
/// [Element.setAttribute](https://developer.mozilla.org/en-US/docs/Web/API/Element/setAttribute)
/// call later.
pub fn add_classes(&mut self, classes: Vec<&str>) {
for class in classes {
self.classes.push(class);
}
}
/// Add classes to this virtual node. Actually it will set by
/// [Element.setAttribute](https://developer.mozilla.org/en-US/docs/Web/API/Element/setAttribute)
/// call later.
pub fn set_classes(&mut self, classes: impl Into<Classes>) {
self.classes = classes.into();
}
/// Sets `value` for an
/// [InputElement](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input).
pub fn set_value<T: ToString>(&mut self, value: &T) {
self.value = Some(value.to_string());
}
/// Sets `kind` property of an
/// [InputElement](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input).
/// Same as set `type` attribute.
pub fn set_kind<T: ToString>(&mut self, value: &T) {
self.kind = Some(value.to_string());
}
/// Sets `checked` property of an
/// [InputElement](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input).
/// (Not a value of node's attribute).
pub fn set_checked(&mut self, value: bool) {
self.checked = value;
}
/// Adds attribute to a virtual node. Not every attribute works when
/// it set as attribute. We use workarounds for:
/// `class`, `type/kind`, `value` and `checked`.
pub fn add_attribute<T: ToString>(&mut self, name: &str, value: &T) {
self.attributes.insert(name.to_owned(), value.to_string());
}
/// Adds attributes to a virtual node. Not every attribute works when
/// it set as attribute. We use workarounds for:
/// `class`, `type/kind`, `value` and `checked`.
pub fn add_attributes(&mut self, attrs: Vec<(String, String)>) {
for (name, value) in attrs {
self.attributes.insert(name, value);
}
}
/// Adds new listener to the node.
/// It's boxed because we want to keep it in a single list.
/// Later `Listener::attach` will attach an actual listener to a DOM node.
pub fn add_listener(&mut self, listener: Rc<dyn Listener>) {
self.listeners.push(listener);
}
/// Adds new listeners to the node.
/// They are boxed because we want to keep them in a single list.
/// Later `Listener::attach` will attach an actual listener to a DOM node.
pub fn add_listeners(&mut self, listeners: Vec<Rc<dyn Listener>>) {
for listener in listeners {
self.listeners.push(listener);
}
}
/// If there is no ancestor or the classes, or the order, differs from the ancestor:
/// - Returns the classes of self separated by spaces.
///
/// Otherwise None is returned.
fn diff_classes<'a>(&'a self, ancestor: &'a Option<Box<Self>>) -> Option<String> {
if ancestor
.as_ref()
.map(|ancestor| self.classes.ne(&ancestor.classes))
.unwrap_or(true)
{
Some(self.classes.to_string())
} else {
None
}
}
/// Similar to diff_classes except for attributes.
///
/// This also handles patching of attributes when the keys are equal but
/// the values are different.
fn diff_attributes<'a>(
&'a self,
ancestor: &'a Option<Box<Self>>,
) -> impl Iterator<Item = Patch<&'a str, &'a str>> + 'a {
// Only change what is necessary.
let to_add_or_replace =
self.attributes.iter().filter_map(move |(key, value)| {
match ancestor
.as_ref()
.and_then(|ancestor| ancestor.attributes.get(&**key))
{
None => Some(Patch::Add(&**key, &**value)),
Some(ancestor_value) if value != ancestor_value => {
Some(Patch::Replace(&**key, &**value))
}
_ => None,
}
});
let to_remove = ancestor
.iter()
.flat_map(|ancestor| ancestor.attributes.keys())
.filter(move |key| !self.attributes.contains_key(&**key))
.map(|key| Patch::Remove(&**key));
to_add_or_replace.chain(to_remove)
}
/// Similar to `diff_attributers` except there is only a single `kind`.
fn diff_kind<'a>(&'a self, ancestor: &'a Option<Box<Self>>) -> Option<Patch<&'a str, ()>> {
match (
self.kind.as_ref(),
ancestor.as_ref().and_then(|anc| anc.kind.as_ref()),
) {
(Some(ref left), Some(ref right)) => {
if left != right {
Some(Patch::Replace(&**left, ()))
} else {
None
}
}
(Some(ref left), None) => Some(Patch::Add(&**left, ())),
(None, Some(right)) => Some(Patch::Remove(&**right)),
(None, None) => None,
}
}
/// Almost identical in spirit to `diff_kind`
fn diff_value<'a>(&'a self, ancestor: &'a Option<Box<Self>>) -> Option<Patch<&'a str, ()>> {
match (
self.value.as_ref(),
ancestor.as_ref().and_then(|anc| anc.value.as_ref()),
) {
(Some(ref left), Some(ref right)) => {
if left != right {
Some(Patch::Replace(&**left, ()))
} else {
None
}
}
(Some(ref left), None) => Some(Patch::Add(&**left, ())),
(None, Some(right)) => Some(Patch::Remove(&**right)),
(None, None) => None,
}
}
fn apply_diffs(&mut self, ancestor: &Option<Box<Self>>) {
let element = self.reference.as_ref().expect("element expected");
// Update parameters
let class_str = self.diff_classes(ancestor);
if let Some(class_str) = class_str {
element
.set_attribute("class", &class_str)
.expect("could not set class");
}
let changes = self.diff_attributes(ancestor);
for change in changes {
match change {
Patch::Add(key, value) | Patch::Replace(key, value) => {
element
.set_attribute(&key, &value)
.expect("invalid attribute key");
}
Patch::Remove(key) => {
cfg_match! {
feature = "std_web" => element.remove_attribute(&key),
feature = "web_sys" => element.remove_attribute(&key).expect("could not remove class"),
};
}
}
}
// `input` element has extra parameters to control
// I override behavior of attributes to make it more clear
// and useful in templates. For example I interpret `checked`
// attribute as `checked` parameter, not `defaultChecked` as browsers do
if let Some(input) = {
cfg_match! {
feature = "std_web" => InputElement::try_from(element.clone()).ok(),
feature = "web_sys" => element.dyn_ref::<InputElement>(),
}
} {
if let Some(change) = self.diff_kind(ancestor) {
let kind = match change {
Patch::Add(kind, _) | Patch::Replace(kind, _) => kind,
Patch::Remove(_) => "",
};
cfg_match! {
feature = "std_web" => ({
//https://github.com/koute/stdweb/commit/3b85c941db00b8e3c942624afd50c5929085fb08
//input.set_kind(&kind);
let input = &input;
js! { @(no_return)
@{input}.type = @{kind};
}
}),
feature = "web_sys" => input.set_type(kind),
}
}
if let Some(change) = self.diff_value(ancestor) {
let raw_value = match change {
Patch::Add(kind, _) | Patch::Replace(kind, _) => kind,
Patch::Remove(_) => "",
};
cfg_match! {
feature = "std_web" => input.set_raw_value(raw_value),
feature = "web_sys" => input.set_value(raw_value),
};
}
// IMPORTANT! This parameter has to be set every time
// to prevent strange behaviour in the browser when the DOM changes
set_checked(&input, self.checked);
} else if let Some(tae) = {
cfg_match! {
feature = "std_web" => TextAreaElement::try_from(element.clone()).ok(),
feature = "web_sys" => element.dyn_ref::<TextAreaElement>(),
}
} {
if let Some(change) = self.diff_value(ancestor) {
let value = match change {
Patch::Add(kind, _) | Patch::Replace(kind, _) => kind,
Patch::Remove(_) => "",
};
tae.set_value(value);
}
}
}
}
impl VDiff for VTag {
/// Remove VTag from parent.
fn detach(&mut self, parent: &Element) -> Option<Node> {
let node = self
.reference
.take()
.expect("tried to remove not rendered VTag from DOM");
// recursively remove its children
self.children.detach(&node);
let next_sibling = node.next_sibling();
if parent.remove_child(&node).is_err() {
warn!("Node not found to remove VTag");
}
next_sibling
}
/// Renders virtual tag over DOM `Element`, but it also compares this with an ancestor `VTag`
/// to compute what to patch in the actual DOM nodes.
fn apply(
&mut self,
parent: &Element,
previous_sibling: Option<&Node>,
ancestor: Option<VNode>,
) -> Option<Node> {
assert!(
self.reference.is_none(),
"reference is ignored so must not be set"
);
let (reform, mut ancestor) = {
match ancestor {
Some(VNode::VTag(mut vtag)) => {
if self.tag == vtag.tag && self.key == vtag.key {
// If tags are equal, preserve the reference that already exists.
self.reference = vtag.reference.take();
(Reform::Keep, Some(vtag))
} else {
// We have to create a new reference, remove ancestor.
(Reform::Before(vtag.detach(parent)), None)
}
}
Some(mut vnode) => {
// It is not a VTag variant we must remove the ancestor.
(Reform::Before(vnode.detach(parent)), None)
}
None => (Reform::Before(None), None),
}
};
// Ensure that `self.reference` exists.
//
// This can use the previous reference or create a new one.
// If we create a new one we must insert it in the correct
// place, which we use `next_sibling` or `previous_sibling` for.
match reform {
Reform::Keep => {}
Reform::Before(next_sibling) => {
let element = if self.tag == "svg"
|| parent
.namespace_uri()
.map_or(false, |ns| ns == SVG_NAMESPACE)
{
let namespace = SVG_NAMESPACE;
#[cfg(feature = "web_sys")]
let namespace = Some(namespace);
document()
.create_element_ns(namespace, &self.tag)
.expect("can't create namespaced element for vtag")
} else {
document()
.create_element(&self.tag)
.expect("can't create element for vtag")
};
if let Some(next_sibling) = next_sibling {
let next_sibling = &next_sibling;
#[cfg(feature = "web_sys")]
let next_sibling = Some(next_sibling);
parent
.insert_before(&element, next_sibling)
.expect("can't insert tag before next sibling");
} else if let Some(next_sibling) = previous_sibling.and_then(|p| p.next_sibling()) {
let next_sibling = &next_sibling;
#[cfg(feature = "web_sys")]
let next_sibling = Some(next_sibling);
parent
.insert_before(&element, next_sibling)
.expect("can't insert tag before next sibling");
} else {
#[cfg_attr(
feature = "std_web",
allow(clippy::let_unit_value, unused_variables)
)]
{
let result = parent.append_child(&element);
#[cfg(feature = "web_sys")]
result.expect("can't append node to parent");
}
}
self.reference = Some(element);
}
}
self.apply_diffs(&ancestor);
// Every render it removes all listeners and attach it back later
// TODO(#943): Compare references of handler to do listeners update better
if let Some(ancestor) = ancestor.as_mut() {
ancestor.captured.clear();
}
let element = self.reference.clone().expect("element expected");
for listener in self.listeners.drain(..) {
let handle = listener.attach(&element);
self.captured.push(handle);
}
// Process children
self.children
.apply(&element, None, ancestor.map(|a| a.children.into()));
let node = self.reference.as_ref().map(|e| {
let node = cfg_match! {
feature = "std_web" => e.as_node(),
feature = "web_sys" => e.deref(),
};
node.to_owned()
});
self.node_ref.set(node.clone());
node
}
}
impl fmt::Debug for VTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "VTag {{ tag: {} }}", self.tag)
}
}
/// Set `checked` value for the `InputElement`.
fn set_checked(input: &InputElement, value: bool) {
cfg_match! {
feature = "std_web" => js!( @(no_return) @{input}.checked = @{value}; ),
feature = "web_sys" => input.set_checked(value),
};
}
impl PartialEq for VTag {
fn eq(&self, other: &VTag) -> bool {
self.tag == other.tag
&& self.value == other.value
&& self.kind == other.kind
&& self.checked == other.checked
&& self.listeners.len() == other.listeners.len()
&& self
.listeners
.iter()
.map(|l| l.kind())
.eq(other.listeners.iter().map(|l| l.kind()))
&& self.attributes == other.attributes
&& self.classes.eq(&other.classes)
&& self.children == other.children
}
}
impl<T> Transformer<T, T> for VTag {
fn transform(from: T) -> T {
from
}
}
impl<'a, T> Transformer<&'a T, T> for VTag
where
T: Clone,
{
fn transform(from: &'a T) -> T {
from.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{html, Component, ComponentLink, Html, ShouldRender};
#[cfg(feature = "std_web")]
use stdweb::web::{document, IElement};
#[cfg(feature = "wasm_test")]
use wasm_bindgen_test::{wasm_bindgen_test as test, wasm_bindgen_test_configure};
#[cfg(feature = "wasm_test")]
wasm_bindgen_test_configure!(run_in_browser);
struct Comp;
impl Component for Comp {
type Message = ();
type Properties = ();
fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
Comp
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
unimplemented!();
}
fn view(&self) -> Html {
unimplemented!();
}
}
struct CompInt;
impl Component for CompInt {
type Message = u32;
type Properties = ();
fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
CompInt
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
unimplemented!();
}
fn view(&self) -> Html {
unimplemented!();
}
}
struct CompBool;
impl Component for CompBool {
type Message = bool;
type Properties = ();
fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
CompBool
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
unimplemented!();
}
fn view(&self) -> Html {
unimplemented!();
}
}
#[test]
fn it_compares_tags() {
let a = html! {
<div></div>
};
let b = html! {
<div></div>
};
let c = html! {
<p></p>
};
assert_eq!(a, b);
assert_ne!(a, c);
}
#[test]
fn it_compares_text() {
let a = html! {
<div>{ "correct" }</div>
};
let b = html! {
<div>{ "correct" }</div>
};
let c = html! {
<div>{ "incorrect" }</div>
};
assert_eq!(a, b);
assert_ne!(a, c);
}
#[test]
fn it_compares_attributes() {
let a = html! {
<div a="test"></div>
};
let b = html! {
<div a="test"></div>
};
let c = html! {
<div a="fail"></div>
};
assert_eq!(a, b);
assert_ne!(a, c);
}
#[test]
fn it_compares_children() {
let a = html! {
<div>
<p></p>
</div>
};
let b = html! {
<div>
<p></p>
</div>
};
let c = html! {
<div>
<span></span>
</div>
};
assert_eq!(a, b);
assert_ne!(a, c);
}
#[test]
fn it_compares_classes() {
let a = html! {
<div class="test"></div>
};
let b = html! {
<div class="test"></div>
};
let c = html! {
<div class="fail"></div>
};
let d = html! {
<div class=format!("fail")></div>
};
assert_eq!(a, b);
assert_ne!(a, c);
assert_eq!(c, d);
}
#[test]
fn classes_from_local_variables() {
let a = html! {
<div class=("class-1", "class-2")></div>
};
let class_2 = "class-2";
let b = html! {
<div class=("class-1", class_2)></div>
};
let class_2_fmt = format!("class-{}", 2);
let c = html! {
<div class=("class-1", class_2_fmt)></div>
};
assert_eq!(a, b);
assert_eq!(a, c);
}
#[test]
fn supports_multiple_classes_string() {
let a = html! {
<div class="class-1 class-2 class-3"></div>
};
let b = html! {
<div class="class-2 class-3 class-1"></div>
};
assert_ne!(a, b);
if let VNode::VTag(vtag) = a {
println!("{:?}", vtag.classes);
assert!(vtag.classes.contains("class-1"));
assert!(vtag.classes.contains("class-2"));
assert!(vtag.classes.contains("class-3"));
} else {
panic!("vtag expected");
}
}
#[test]
fn supports_multiple_classes_vec() {
let mut classes = vec!["class-1"];
classes.push("class-2");
let a = html! {
<div class=classes></div>
};
if let VNode::VTag(vtag) = a {
println!("{:?}", vtag.classes);
assert!(vtag.classes.contains("class-1"));
assert!(vtag.classes.contains("class-2"));
assert!(!vtag.classes.contains("class-3"));
} else {
panic!("vtag expected");
}
}
#[test]
fn filter_empty_string_classes_vec() {
let mut classes = vec![""];
classes.push("class-2");
let a = html! { <div class=vec![""]></div> };
let b = html! { <div class=("")></div> };
let c = html! { <div class=""></div> };
if let VNode::VTag(vtag) = a {
assert!(vtag.classes.is_empty());
} else {
panic!("vtag expected");
}
if let VNode::VTag(vtag) = b {
assert!(vtag.classes.is_empty());
} else {
panic!("vtag expected");
}
if let VNode::VTag(vtag) = c {
assert!(vtag.classes.is_empty());
} else {
panic!("vtag expected");
}
}
fn assert_vtag(node: &mut VNode) -> &mut VTag {
if let VNode::VTag(vtag) = node {
return vtag;
}
panic!("should be vtag");
}
fn assert_namespace(vtag: &VTag, namespace: &'static str) {
assert_eq!(
vtag.reference.as_ref().unwrap().namespace_uri().unwrap(),
namespace
);
}
#[test]
fn supports_svg() {
#[cfg(feature = "std_web")]
let document = document();
#[cfg(feature = "web_sys")]
let document = web_sys::window().unwrap().document().unwrap();
let div_el = document.create_element("div").unwrap();
let namespace = SVG_NAMESPACE;
#[cfg(feature = "web_sys")]
let namespace = Some(namespace);
let svg_el = document.create_element_ns(namespace, "svg").unwrap();
let mut g_node = html! { <g></g> };
let path_node = html! { <path></path> };
let mut svg_node = html! { <svg>{path_node}</svg> };
let svg_tag = assert_vtag(&mut svg_node);
svg_tag.apply(&div_el, None, None);
assert_namespace(svg_tag, SVG_NAMESPACE);
let path_tag = assert_vtag(svg_tag.children.get_mut(0).unwrap());
assert_namespace(path_tag, SVG_NAMESPACE);
let g_tag = assert_vtag(&mut g_node);
g_tag.apply(&div_el, None, None);
assert_namespace(g_tag, HTML_NAMESPACE);
g_tag.reference = None;
g_tag.apply(&svg_el, None, None);
assert_namespace(g_tag, SVG_NAMESPACE);
}
#[test]
fn keeps_order_of_classes() {
let a = html! {
<div class="class-1 class-2 class-3",></div>
};
if let VNode::VTag(vtag) = a {
println!("{:?}", vtag.classes);
assert_eq!(vtag.classes.to_string(), "class-1 class-2 class-3");
}
}
#[test]
fn it_compares_values() {
let a = html! {
<input value="test"/>
};
let b = html! {
<input value="test"/>
};
let c = html! {
<input value="fail"/>
};
assert_eq!(a, b);
assert_ne!(a, c);
}
#[test]
fn it_compares_kinds() {
let a = html! {
<input type="text"/>
};
let b = html! {
<input type="text"/>
};
let c = html! {
<input type="hidden"/>
};
assert_eq!(a, b);
assert_ne!(a, c);
}
#[test]
fn it_compares_checked() {
let a = html! {
<input type="checkbox" checked=false />
};
let b = html! {
<input type="checkbox" checked=false />
};
let c = html! {
<input type="checkbox" checked=true />
};
assert_eq!(a, b);
assert_ne!(a, c);
}
#[test]
fn it_allows_aria_attributes() {
let a = html! {
<p aria-controls="it-works">
<a class="btn btn-primary"
data-toggle="collapse"
href="#collapseExample"
role="button"
aria-expanded="false"
aria-controls="collapseExample">
{ "Link with href" }
</a>
<button class="btn btn-primary"
type="button"
data-toggle="collapse"
data-target="#collapseExample"
aria-expanded="false"
aria-controls="collapseExample">
{ "Button with data-target" }
</button>
<div own-attribute-with-multiple-parts="works" />
</p>
};
if let VNode::VTag(vtag) = a {
assert!(vtag.attributes.contains_key("aria-controls"));
assert_eq!(
vtag.attributes.get("aria-controls"),
Some(&"it-works".into())
);
} else {
panic!("vtag expected");
}
}
#[test]
fn it_checks_mixed_closing_tags() {
let a = html! { <div> <div/> </div> };
let b = html! { <div> <div></div> </div> };
assert_eq!(a, b);
}
#[test]
fn it_checks_misleading_gt() {
html! { <div data-val=<u32 as Default>::default()></div> };
html! { <div data-val=Box::<u32>::default()></div> };
html! { <div><a data-val=<u32 as Default>::default() /> </div> };
html! { <div><a data-val=Box::<u32>::default() /></div> };
}
#[test]
fn swap_order_of_classes() {
let parent = document().create_element("div").unwrap();
#[cfg(feature = "std_web")]
document().body().unwrap().append_child(&parent);
#[cfg(feature = "web_sys")]
document().body().unwrap().append_child(&parent).unwrap();
let mut elem = html! { <div class=("class-1", "class-2", "class-3")></div> };
elem.apply(&parent, None, None);
let vtag = if let VNode::VTag(vtag) = elem {
vtag
} else {
panic!("should be vtag")
};
let expected = "class-1 class-2 class-3";
assert_eq!(vtag.classes.to_string(), expected);
assert_eq!(
vtag.reference
.as_ref()
.unwrap()
.get_attribute("class")
.unwrap(),
expected
);
let ancestor = vtag;
let elem = html! { <div class=("class-3", "class-2", "class-1")></div> };
let mut vtag = if let VNode::VTag(vtag) = elem {
vtag
} else {
panic!("should be vtag")
};
vtag.apply(&parent, None, Some(VNode::VTag(ancestor)));
let expected = "class-3 class-2 class-1";
assert_eq!(vtag.classes.to_string(), expected);
assert_eq!(
vtag.reference
.as_ref()
.unwrap()
.get_attribute("class")
.unwrap(),
expected
);
}
#[test]
fn add_class_to_the_middle() {
let parent = document().create_element("div").unwrap();
#[cfg(feature = "std_web")]
document().body().unwrap().append_child(&parent);
#[cfg(feature = "web_sys")]
document().body().unwrap().append_child(&parent).unwrap();
let mut elem = html! { <div class=("class-1", "class-3")></div> };
elem.apply(&parent, None, None);
let vtag = if let VNode::VTag(vtag) = elem {
vtag
} else {
panic!("should be vtag")
};
let expected = "class-1 class-3";
assert_eq!(vtag.classes.to_string(), expected);
assert_eq!(
vtag.reference
.as_ref()
.unwrap()
.get_attribute("class")
.unwrap(),
expected
);
let ancestor = vtag;
let elem = html! { <div class=("class-1", "class-2", "class-3")></div> };
let mut vtag = if let VNode::VTag(vtag) = elem {
vtag
} else {
panic!("should be vtag")
};
vtag.apply(&parent, None, Some(VNode::VTag(ancestor)));
let expected = "class-1 class-2 class-3";
assert_eq!(vtag.classes.to_string(), expected);
assert_eq!(
vtag.reference
.as_ref()
.unwrap()
.get_attribute("class")
.unwrap(),
expected
);
}
}
| 31.252544 | 111 | 0.499378 |
03112ff354ec5feb93bfa1cd3d05c7ac17e5bfc0 | 7,550 | //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenInit`.
use crate::core::ics04_channel::channel::{ChannelEnd, State};
use crate::core::ics04_channel::context::ChannelReader;
use crate::core::ics04_channel::error::Error;
use crate::core::ics04_channel::events::Attributes;
use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult};
use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit;
use crate::core::ics24_host::identifier::ChannelId;
use crate::events::IbcEvent;
use crate::handler::{HandlerOutput, HandlerResult};
use crate::prelude::*;
pub(crate) fn process(
ctx: &dyn ChannelReader,
msg: MsgChannelOpenInit,
) -> HandlerResult<ChannelResult, Error> {
let mut output = HandlerOutput::builder();
// Channel capabilities
let channel_cap = ctx.authenticated_capability(&msg.port_id().clone())?;
if msg.channel().connection_hops().len() != 1 {
return Err(Error::invalid_connection_hops_length(
1,
msg.channel().connection_hops().len(),
));
}
// An IBC connection running on the local (host) chain should exist.
let conn = ctx.connection_end(&msg.channel().connection_hops()[0])?;
let get_versions = conn.versions();
let version = match get_versions.as_slice() {
[version] => version,
_ => return Err(Error::invalid_version_length_connection()),
};
let channel_feature = msg.channel().ordering().to_string();
if !version.is_supported_feature(channel_feature) {
return Err(Error::channel_feature_not_suported_by_connection());
}
// TODO: Check that `version` is non empty but not necessary coherent
if msg.channel().version().is_empty() {
return Err(Error::empty_version());
}
// Channel identifier construction.
let id_counter = ctx.channel_counter()?;
let chan_id = ChannelId::new(id_counter);
output.log(format!(
"success: generated new channel identifier: {}",
chan_id
));
let new_channel_end = ChannelEnd::new(
State::Init,
*msg.channel().ordering(),
msg.channel().counterparty().clone(),
msg.channel().connection_hops().clone(),
msg.channel().version(),
);
output.log("success: no channel found");
let result = ChannelResult {
port_id: msg.port_id().clone(),
channel_id: chan_id.clone(),
channel_end: new_channel_end,
channel_id_state: ChannelIdState::Generated,
channel_cap,
};
let event_attributes = Attributes {
channel_id: Some(chan_id),
..Default::default()
};
output.emit(IbcEvent::OpenInitChannel(event_attributes.into()));
Ok(output.with_result(result))
}
#[cfg(test)]
mod tests {
use crate::prelude::*;
use test_log::test;
use crate::core::ics03_connection::connection::ConnectionEnd;
use crate::core::ics03_connection::connection::State as ConnectionState;
use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init;
use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit;
use crate::core::ics03_connection::version::get_compatible_versions;
use crate::core::ics04_channel::channel::State;
use crate::core::ics04_channel::handler::{channel_dispatch, ChannelResult};
use crate::core::ics04_channel::msgs::chan_open_init::test_util::get_dummy_raw_msg_chan_open_init;
use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit;
use crate::core::ics04_channel::msgs::ChannelMsg;
use crate::core::ics24_host::identifier::ConnectionId;
use crate::events::IbcEvent;
use crate::mock::context::MockContext;
#[test]
fn chan_open_init_msg_processing() {
struct Test {
name: String,
ctx: MockContext,
msg: ChannelMsg,
want_pass: bool,
}
let msg_chan_init =
MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap();
let context = MockContext::default();
let msg_conn_init =
MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap();
let init_conn_end = ConnectionEnd::new(
ConnectionState::Init,
msg_conn_init.client_id().clone(),
msg_conn_init.counterparty().clone(),
get_compatible_versions(),
msg_conn_init.delay_period,
);
let cid = ConnectionId::default();
let tests: Vec<Test> = vec![
Test {
name: "Processing fails because no connection exists in the context".to_string(),
ctx: context.clone(),
msg: ChannelMsg::ChannelOpenInit(msg_chan_init.clone()),
want_pass: false,
},
Test {
name: "Processing fails because port does not have a capability associated"
.to_string(),
ctx: context
.clone()
.with_connection(cid.clone(), init_conn_end.clone()),
msg: ChannelMsg::ChannelOpenInit(msg_chan_init.clone()),
want_pass: false,
},
Test {
name: "Good parameters".to_string(),
ctx: context
.with_connection(cid, init_conn_end)
.with_port_capability(
MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init())
.unwrap()
.port_id()
.clone(),
),
msg: ChannelMsg::ChannelOpenInit(msg_chan_init),
want_pass: true,
},
]
.into_iter()
.collect();
for test in tests {
let res = channel_dispatch(&test.ctx, test.msg.clone());
// Additionally check the events and the output objects in the result.
match res {
Ok(proto_output) => {
assert!(
test.want_pass,
"chan_open_init: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}",
test.name,
test.msg.clone(),
test.ctx.clone()
);
assert!(!proto_output.events.is_empty()); // Some events must exist.
// The object in the output is a ChannelEnd, should have init state.
let res: ChannelResult = proto_output.result;
assert_eq!(res.channel_end.state().clone(), State::Init);
let msg_init = test.msg.clone();
if let ChannelMsg::ChannelOpenInit(msg_init) = msg_init {
assert_eq!(res.port_id.clone(), msg_init.port_id().clone());
}
for e in proto_output.events.iter() {
assert!(matches!(e, &IbcEvent::OpenInitChannel(_)));
}
}
Err(e) => {
assert!(
!test.want_pass,
"chan_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}",
test.name,
test.msg,
test.ctx.clone(),
e,
);
}
}
}
}
}
| 36.650485 | 112 | 0.574437 |
91650e37b79b03ad9e9b499859177a18476a9fa7 | 238 | //! These integer lock levels are checked in `c/vfs.c`
#[allow(dead_code)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[repr(i32)]
pub enum LockLevel {
None,
Shared,
Reserved,
Pending,
Exclusive,
}
| 18.307692 | 61 | 0.647059 |
dd8e9986fb76537ae1bb1315915637ff1b8fdd17 | 6,879 | //! Justfile summary creation, for testing purposes only.
//!
//! The contents of this module are not bound by any stability guarantees.
//! Breaking changes may be introduced at any time.
//!
//! The main entry point into this module is the `summary` function, which
//! parses a justfile at a given path and produces a `Summary` object, which
//! broadly captures the functionality of the parsed justfile, or an error
//! message.
//!
//! This functionality is intended to be used with `janus`, a tool for ensuring
//! that changes to just do not inadvertently break or change the interpretation
//! of existing justfiles.
use std::{collections::BTreeMap, fs, io, path::Path};
use crate::compiler::Compiler;
mod full {
pub(crate) use crate::{
assignment::Assignment, dependency::Dependency, expression::Expression, fragment::Fragment,
justfile::Justfile, line::Line, parameter::Parameter, parameter_kind::ParameterKind,
recipe::Recipe, thunk::Thunk,
};
}
pub fn summary(path: &Path) -> Result<Result<Summary, String>, io::Error> {
let text = fs::read_to_string(path)?;
match Compiler::compile(&text) {
Ok(justfile) => Ok(Ok(Summary::new(justfile))),
Err(compilation_error) => Ok(Err(compilation_error.to_string())),
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub struct Summary {
pub assignments: BTreeMap<String, Assignment>,
pub recipes: BTreeMap<String, Recipe>,
}
impl Summary {
fn new(justfile: full::Justfile) -> Summary {
let mut aliases = BTreeMap::new();
for alias in justfile.aliases.values() {
aliases
.entry(alias.target.name())
.or_insert_with(Vec::new)
.push(alias.name.to_string());
}
Summary {
recipes: justfile
.recipes
.into_iter()
.map(|(name, recipe)| {
(
name.to_string(),
Recipe::new(&recipe, aliases.remove(name).unwrap_or_default()),
)
})
.collect(),
assignments: justfile
.assignments
.iter()
.map(|(name, assignment)| (name.to_string(), Assignment::new(assignment)))
.collect(),
}
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub struct Recipe {
pub aliases: Vec<String>,
pub dependencies: Vec<Dependency>,
pub lines: Vec<Line>,
pub private: bool,
pub quiet: bool,
pub shebang: bool,
pub parameters: Vec<Parameter>,
}
impl Recipe {
fn new(recipe: &full::Recipe, aliases: Vec<String>) -> Recipe {
Recipe {
private: recipe.private,
shebang: recipe.shebang,
quiet: recipe.quiet,
dependencies: recipe
.dependencies
.iter()
.map(|dependency| Dependency::new(dependency))
.collect(),
lines: recipe.body.iter().map(Line::new).collect(),
parameters: recipe.parameters.iter().map(Parameter::new).collect(),
aliases,
}
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub struct Parameter {
pub kind: ParameterKind,
pub name: String,
pub default: Option<Expression>,
}
impl Parameter {
fn new(parameter: &full::Parameter) -> Parameter {
Parameter {
kind: ParameterKind::new(parameter.kind),
name: parameter.name.lexeme().to_owned(),
default: parameter.default.as_ref().map(Expression::new),
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub enum ParameterKind {
Singular,
Plus,
Star,
}
impl ParameterKind {
fn new(parameter_kind: full::ParameterKind) -> Self {
match parameter_kind {
full::ParameterKind::Singular => Self::Singular,
full::ParameterKind::Plus => Self::Plus,
full::ParameterKind::Star => Self::Star,
}
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub struct Line {
pub fragments: Vec<Fragment>,
}
impl Line {
fn new(line: &full::Line) -> Line {
Line {
fragments: line.fragments.iter().map(Fragment::new).collect(),
}
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub enum Fragment {
Text { text: String },
Expression { expression: Expression },
}
impl Fragment {
fn new(fragment: &full::Fragment) -> Fragment {
match fragment {
full::Fragment::Text { token } => Fragment::Text {
text: token.lexeme().to_owned(),
},
full::Fragment::Interpolation { expression } => Fragment::Expression {
expression: Expression::new(expression),
},
}
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub struct Assignment {
pub exported: bool,
pub expression: Expression,
}
impl Assignment {
fn new(assignment: &full::Assignment) -> Assignment {
Assignment {
exported: assignment.export,
expression: Expression::new(&assignment.value),
}
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub enum Expression {
Backtick {
command: String,
},
Call {
name: String,
arguments: Vec<Expression>,
},
Concatination {
lhs: Box<Expression>,
rhs: Box<Expression>,
},
String {
text: String,
},
Variable {
name: String,
},
}
impl Expression {
fn new(expression: &full::Expression) -> Expression {
use full::Expression::*;
match expression {
Backtick { contents, .. } => Expression::Backtick {
command: (*contents).to_owned(),
},
Call { thunk } => match thunk {
full::Thunk::Nullary { name, .. } => Expression::Call {
name: name.lexeme().to_owned(),
arguments: Vec::new(),
},
full::Thunk::Unary { name, arg, .. } => Expression::Call {
name: name.lexeme().to_owned(),
arguments: vec![Expression::new(arg)],
},
full::Thunk::Binary {
name, args: [a, b], ..
} => Expression::Call {
name: name.lexeme().to_owned(),
arguments: vec![Expression::new(a), Expression::new(b)],
},
},
Concatination { lhs, rhs } => Expression::Concatination {
lhs: Box::new(Expression::new(lhs)),
rhs: Box::new(Expression::new(rhs)),
},
StringLiteral { string_literal } => Expression::String {
text: string_literal.cooked.to_string(),
},
Variable { name, .. } => Expression::Variable {
name: name.lexeme().to_owned(),
},
Group { contents } => Expression::new(contents),
}
}
}
#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Debug, Clone)]
pub struct Dependency {
pub recipe: String,
pub arguments: Vec<Expression>,
}
impl Dependency {
fn new(dependency: &full::Dependency) -> Dependency {
Dependency {
recipe: dependency.recipe.name().to_owned(),
arguments: dependency.arguments.iter().map(Expression::new).collect(),
}
}
}
| 26.871094 | 95 | 0.616369 |
e9188d78c593bbbe3a72586918b98162ab7da641 | 713 | use hal::blocking::i2c;
use {Apds9960, Error, Register, DEV_ADDR};
impl<I2C, E> Apds9960<I2C>
where
I2C: i2c::WriteRead<Error = E>,
{
/// Read the device ID.
///
/// This is per default `0xAB`.
pub fn read_device_id(&mut self) -> Result<u8, Error<E>> {
self.read_register(Register::ID)
}
pub(crate) fn read_register(&mut self, register: u8) -> Result<u8, Error<E>> {
let mut data = [0];
self.read_data(register, &mut data)?;
Ok(data[0])
}
pub(crate) fn read_data(&mut self, register: u8, data: &mut [u8]) -> Result<(), Error<E>> {
self.i2c
.write_read(DEV_ADDR, &[register], data)
.map_err(Error::I2C)
}
}
| 26.407407 | 95 | 0.568022 |
087dd75c6e7f93694302314cba9c77c50a6321c6 | 1,265 | fn main() {}
struct Solution;
/**
* Your MinStack object will be instantiated and called as such:
* let obj = MinStack::new();
* obj.push(val);
* obj.pop();
* let ret_3: i32 = obj.top();
* let ret_4: i32 = obj.get_min();
*/
struct MinStack {
data: Vec<i32>,
min_stack: Vec<i32>,
}
/**
* `&self` means the method takes an immutable reference.
* If you need a mutable reference, change it to `&mut self` instead.
*/
impl MinStack {
/** initialize your data structure here. */
fn new() -> Self {
Self {
data: vec![],
min_stack: vec![],
}
}
fn push(&mut self, val: i32) {
self.data.push(val);
if !self.min_stack.is_empty() {
if self.min_stack.last().unwrap().lt(&val) {
return;
}
}
self.min_stack.push(val);
}
fn pop(&mut self) {
if let Some(x) = self.data.pop() {
if let Some(&y) = self.min_stack.last() {
if y == x {
self.min_stack.pop();
}
}
}
}
fn top(&self) -> i32 {
self.data.last().unwrap().clone()
}
fn get_min(&self) -> i32 {
self.min_stack.last().unwrap().clone()
}
}
| 20.737705 | 69 | 0.490909 |
0e3d23713d524abed164d991f5187ec007465a7d | 15,291 | use parking_lot::RwLock;
use std::collections::HashSet;
use crate::class::*;
use crate::ctxt::{Fct, FctId, FctKind, FctParent, FctSrc, NodeMap, SemContext};
use crate::semck;
use crate::sym::Sym;
use crate::ty::BuiltinType;
use dora_parser::ast::visit::{self, Visitor};
use dora_parser::ast::{self, Ast};
use dora_parser::error::msg::Msg;
use dora_parser::interner::Name;
use dora_parser::lexer::position::Position;
pub fn check<'ast>(ctxt: &mut SemContext<'ast>, ast: &'ast Ast, map_cls_defs: &NodeMap<ClassId>) {
let mut clsck = ClsCheck {
ctxt: ctxt,
ast: ast,
cls_id: None,
map_cls_defs: map_cls_defs,
};
clsck.check();
}
struct ClsCheck<'x, 'ast: 'x> {
ctxt: &'x mut SemContext<'ast>,
ast: &'ast ast::Ast,
map_cls_defs: &'x NodeMap<ClassId>,
cls_id: Option<ClassId>,
}
impl<'x, 'ast> ClsCheck<'x, 'ast> {
fn check(&mut self) {
self.visit_ast(self.ast);
}
fn add_field(&mut self, pos: Position, name: Name, ty: BuiltinType, reassignable: bool) {
let cls = self.ctxt.classes.idx(self.cls_id.unwrap());
let mut cls = cls.write();
for field in &cls.fields {
if field.name == name {
let name = self.ctxt.interner.str(name).to_string();
report(self.ctxt, pos, Msg::ShadowField(name));
}
}
let field = Field {
id: cls.fields.len().into(),
name: name,
ty: ty,
offset: 0,
reassignable: reassignable,
};
cls.fields.push(field);
}
}
impl<'x, 'ast> Visitor<'ast> for ClsCheck<'x, 'ast> {
fn visit_class(&mut self, c: &'ast ast::Class) {
self.cls_id = Some(*self.map_cls_defs.get(c.id).unwrap());
self.ctxt.sym.lock().push_level();
if let Some(ref type_params) = c.type_params {
if type_params.len() > 0 {
let mut names = HashSet::new();
let mut type_param_id = 0;
let cls = self.ctxt.classes.idx(self.cls_id.unwrap());
let mut cls = cls.write();
let mut params = Vec::new();
for type_param in type_params {
if !names.insert(type_param.name) {
let name = self.ctxt.interner.str(type_param.name).to_string();
let msg = Msg::TypeParamNameNotUnique(name);
self.ctxt
.diag
.lock()
.report_without_path(type_param.pos, msg);
}
params.push(BuiltinType::ClassTypeParam(cls.id, type_param_id.into()));
for bound in &type_param.bounds {
let ty = semck::read_type(self.ctxt, bound);
match ty {
Some(BuiltinType::Class(cls_id, _)) => {
if let None = cls.type_params[type_param_id].class_bound {
cls.type_params[type_param_id].class_bound = Some(cls_id);
} else {
let msg = Msg::MultipleClassBounds;
self.ctxt
.diag
.lock()
.report_without_path(type_param.pos, msg);
}
}
Some(BuiltinType::Trait(trait_id)) => {
if !cls.type_params[type_param_id].trait_bounds.insert(trait_id) {
let msg = Msg::DuplicateTraitBound;
self.ctxt
.diag
.lock()
.report_without_path(type_param.pos, msg);
}
}
None => {
// unknown type, error is already thrown
}
_ => {
let msg = Msg::BoundExpected;
self.ctxt.diag.lock().report_without_path(bound.pos(), msg);
}
}
}
let sym = Sym::SymClassTypeParam(cls.id, type_param_id.into());
self.ctxt.sym.lock().insert(type_param.name, sym);
type_param_id += 1;
}
let list_id = self.ctxt.lists.lock().insert(params.into());
cls.ty = BuiltinType::Class(cls.id, list_id);
} else {
let msg = Msg::TypeParamsExpected;
self.ctxt.diag.lock().report_without_path(c.pos, msg);
}
}
visit::walk_class(self, c);
if let Some(ref parent_class) = c.parent_class {
let name = self.ctxt.interner.str(parent_class.name).to_string();
let sym = self.ctxt.sym.lock().get(parent_class.name);
match sym {
Some(Sym::SymClass(clsid)) => {
let super_cls = self.ctxt.classes.idx(clsid);
let super_cls = super_cls.read();
if super_cls.has_open {
let cls = self.ctxt.classes.idx(self.cls_id.unwrap());
let mut cls = cls.write();
cls.parent_class = Some(clsid);
} else {
let msg = Msg::UnderivableType(name);
self.ctxt
.diag
.lock()
.report_without_path(parent_class.pos, msg);
}
let number_type_params = parent_class
.type_params
.as_ref()
.map(|x| x.len())
.unwrap_or(0);
if number_type_params != super_cls.type_params.len() {
let msg = Msg::WrongNumberTypeParams(
super_cls.type_params.len(),
number_type_params,
);
self.ctxt
.diag
.lock()
.report_without_path(parent_class.pos, msg);
}
}
_ => {
let msg = Msg::UnknownClass(name);
self.ctxt
.diag
.lock()
.report_without_path(parent_class.pos, msg);
}
};
} else {
let object_cls = self.ctxt.vips.object_class;
let cls_id = self.cls_id.unwrap();
if cls_id != object_cls {
let cls = self.ctxt.classes.idx(cls_id);
let mut cls = cls.write();
cls.parent_class = Some(object_cls);
}
}
self.cls_id = None;
self.ctxt.sym.lock().pop_level();
}
fn visit_field(&mut self, f: &'ast ast::Field) {
let ty = semck::read_type(self.ctxt, &f.data_type).unwrap_or(BuiltinType::Unit);
self.add_field(f.pos, f.name, ty, f.reassignable);
if !f.reassignable && !f.primary_ctor && f.expr.is_none() {
self.ctxt
.diag
.lock()
.report_without_path(f.pos, Msg::LetMissingInitialization);
}
}
fn visit_ctor(&mut self, f: &'ast ast::Function) {
let clsid = self.cls_id.unwrap();
let kind = if f.block.is_some() {
FctKind::Source(RwLock::new(FctSrc::new()))
} else {
FctKind::Definition
};
let fct = Fct {
id: FctId(0),
pos: f.pos,
ast: f,
name: f.name,
param_types: Vec::new(),
return_type: BuiltinType::Unit,
parent: FctParent::Class(clsid),
has_override: f.has_override,
has_open: f.has_open,
has_final: f.has_final,
is_pub: true,
is_extern: false,
is_static: false,
is_abstract: false,
internal: f.internal,
internal_resolved: false,
overrides: None,
throws: f.throws,
is_constructor: f.is_constructor,
vtable_index: None,
initialized: false,
impl_for: None,
type_params: Vec::new(),
kind: kind,
};
let fctid = self.ctxt.add_fct(fct);
let cls = self.ctxt.classes.idx(self.cls_id.unwrap());
let mut cls = cls.write();
cls.constructor = Some(fctid);
}
fn visit_method(&mut self, f: &'ast ast::Function) {
if self.cls_id.is_none() {
return;
}
let kind = if f.block.is_some() {
FctKind::Source(RwLock::new(FctSrc::new()))
} else {
FctKind::Definition
};
let fct = Fct {
id: FctId(0),
ast: f,
pos: f.pos,
is_extern: false,
name: f.name,
param_types: Vec::new(),
return_type: BuiltinType::Unit,
parent: FctParent::Class(self.cls_id.unwrap()),
has_override: f.has_override,
// abstract for methods also means that method is open to
// override
has_open: f.has_open || f.is_abstract,
has_final: f.has_final,
is_pub: f.is_pub,
is_static: f.is_static,
is_abstract: f.is_abstract,
internal: f.internal,
internal_resolved: false,
overrides: None,
throws: f.throws,
is_constructor: false,
vtable_index: None,
initialized: false,
impl_for: None,
type_params: Vec::new(),
kind: kind,
};
let fctid = self.ctxt.add_fct(fct);
let cls = self.ctxt.classes.idx(self.cls_id.unwrap());
let mut cls = cls.write();
cls.methods.push(fctid);
}
}
fn report(ctxt: &SemContext, pos: Position, msg: Msg) {
ctxt.diag.lock().report_without_path(pos, msg);
}
#[cfg(test)]
mod tests {
use crate::semck::tests::*;
use dora_parser::error::msg::Msg;
#[test]
fn test_multiple_definition() {
err(
"class Foo class Foo",
pos(1, 11),
Msg::ShadowClass("Foo".into()),
);
}
#[test]
fn test_class_and_function() {
err(
"fun Foo() {} class Foo",
pos(1, 14),
Msg::ShadowFunction("Foo".into()),
);
err(
"class Foo fun Foo() {}",
pos(1, 11),
Msg::ShadowClass("Foo".into()),
);
}
#[test]
fn test_class_definition() {
ok("class Foo");
ok("class Foo()");
ok("class Foo(let a: Int)");
ok("class Foo(let a: Int, let b:Int)");
ok("class Foo(let a: Foo)");
ok("class Foo(let a: Bar) class Bar");
err(
"class Foo(let a: Unknown)",
pos(1, 18),
Msg::UnknownType("Unknown".into()),
);
err(
"class Foo(let a: Int, let a: Int)",
pos(1, 27),
Msg::ShadowField("a".to_string()),
);
}
#[test]
fn class_with_unknown_super_class() {
err("class B : A {}", pos(1, 11), Msg::UnknownClass("A".into()));
err(
"open class B : A {}",
pos(1, 16),
Msg::UnknownClass("A".into()),
);
err(
"class B : Int {}",
pos(1, 11),
Msg::UnderivableType("Int".into()),
);
}
#[test]
fn class_with_open_modifier() {
ok("open class A {}");
ok("open class A {} class B : A {}");
err(
"class A {} class B : A {}",
pos(1, 22),
Msg::UnderivableType("A".into()),
);
}
#[test]
fn non_field_ctor_arguments() {
ok("class Foo(a: Int, b: Int)");
ok("class Foo(let a: Int, b: Int)");
ok("class Foo(a: Int, var b: Int)");
err(
"class Foo(a: Int, a: Int)",
pos(1, 1),
Msg::ShadowParam("a".into()),
);
err(
"class Foo(a: Int, let a: Int)",
pos(1, 1),
Msg::ShadowParam("a".into()),
);
err(
"class Foo(let a: Int, a: Int)",
pos(1, 1),
Msg::ShadowParam("a".into()),
);
err(
"class Foo(a: Int) fun f(x: Foo) { x.a = 1; }",
pos(1, 36),
Msg::UnknownField("a".into(), "Foo".into()),
);
ok("class Foo(a: Int) fun foo() -> Foo { return Foo(1); } ");
}
#[test]
fn field_defined_twice() {
err(
"class Foo { var a: Int; var a: Int; }",
pos(1, 25),
Msg::ShadowField("a".into()),
);
err(
"class Foo(let a: Int) { var a: Int; }",
pos(1, 25),
Msg::ShadowField("a".into()),
);
}
#[test]
fn let_field_without_initialization() {
err(
"class Foo { let a: Int; }",
pos(1, 13),
Msg::LetMissingInitialization,
);
}
#[test]
fn field_self_assignment() {
err(
"class Foo(a: Int) { var b: Int = b; }",
pos(1, 34),
Msg::UnknownIdentifier("b".into()),
);
}
#[test]
fn test_generic_class() {
ok("class A<T>");
ok("class A<X, Y>");
err(
"class A<T, T>",
pos(1, 12),
Msg::TypeParamNameNotUnique("T".into()),
);
err("class A<>", pos(1, 1), Msg::TypeParamsExpected);
}
#[test]
fn test_generic_argument() {
ok("class A<T>(val: T)");
ok("class A<T>(var val: T)");
ok("class A<T>(let val: T)");
}
#[test]
fn test_generic_bound() {
err(
"class A<T: Foo>",
pos(1, 12),
Msg::UnknownType("Foo".into()),
);
ok("class Foo class A<T: Foo>");
ok("trait Foo {} class A<T: Foo>");
}
#[test]
fn test_generic_multiple_class_bounds() {
err(
"class Foo class Bar
class A<T: Foo + Bar>",
pos(2, 21),
Msg::MultipleClassBounds,
);
}
#[test]
fn test_duplicate_trait_bound() {
err(
"trait Foo {}
class A<T: Foo + Foo>",
pos(2, 21),
Msg::DuplicateTraitBound,
);
}
#[test]
fn test_super_class_with_superfluous_type_params() {
err(
"
open class A
class B: A<Int> {}",
pos(3, 22),
Msg::WrongNumberTypeParams(0, 1),
);
}
}
| 30.041257 | 98 | 0.443398 |
4b2bf44c29cb361856cb3f0c21270b693724e173 | 19,123 | use std::rc::Rc;
use bytemuck::{Pod, Zeroable};
use wgpu::util::DeviceExt;
use super::mesh_pass::MeshPass;
#[derive(Copy, Clone)]
pub enum MaterialKind {
Untextured,
TexturedUnlit,
Textured,
TexturedNorm,
TexturedNormMat,
TexturedEmissive,
}
#[derive(Clone)]
pub struct MaterialData {
pub factors: MaterialFactors,
pub lighting: bool,
pub texture: Option<Rc<wgpu::Texture>>,
pub normal: Option<Rc<wgpu::Texture>>,
pub metallic_roughness: Option<Rc<wgpu::Texture>>,
pub ao: Option<Rc<wgpu::Texture>>,
pub emissive: Option<Rc<wgpu::Texture>>,
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct MaterialFactors {
pub diffuse: [f32; 4],
pub metal: f32,
pub rough: f32,
pub emissive: [f32; 3],
pub extra_emissive: [f32; 3],
}
pub struct Material {
pub factors: MaterialFactors,
kind: MaterialKind,
factors_buf: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
impl Material {
pub fn new(
device: &mut wgpu::Device,
mesh_pass: &MeshPass,
data: &MaterialData,
) -> Self {
if data.lighting {
if let Some(ref texture) = data.texture {
if let Some(ref emissive) = data.emissive {
let normal = data.normal.as_ref().expect("textured model without normal map");
let metallic_roughness = data.metallic_roughness.as_ref().expect("textured model without metallic roughness map");
let ao = data.ao.as_ref().expect("textured model without ao map");
Material::textured_emissive(
device, mesh_pass,
data.factors.into(),
&texture,
normal,
metallic_roughness,
ao,
&emissive
)
} else if let Some(ref normal) = data.normal {
if let Some(metallic_roughness) = data.metallic_roughness.as_ref() {
let ao = data.ao.as_ref().expect("textured model without ao map");
Material::textured_norm_mat(
device, mesh_pass,
data.factors.into(),
&texture,
&normal,
metallic_roughness,
ao,
)
} else {
Material::textured_norm(
device, mesh_pass,
data.factors.into(),
&texture,
&normal,
)
}
} else {
Material::textured(
device, mesh_pass,
data.factors.into(),
&texture,
)
}
} else {
Material::untextured(
device, mesh_pass,
data.factors.into(),
)
}
} else {
if let Some(ref texture) = data.texture {
Material::textured_unlit(
device, mesh_pass,
data.factors.into(),
&texture,
)
} else {
Material::untextured(
device, mesh_pass,
data.factors.into(),
)
}
}
}
pub fn upload_factors_to_gpu(
&self,
device: &mut wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
) {
let factors_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[MaterialFactorsUpload::from(self.factors)]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_SRC,
});
encoder.copy_buffer_to_buffer(
&factors_buf, 0, &self.factors_buf, 0,
std::mem::size_of::<MaterialFactorsUpload>() as wgpu::BufferAddress,
);
}
fn textured_unlit(
device: &mut wgpu::Device,
mesh_pass: &MeshPass,
factors: MaterialFactors,
texture: &wgpu::Texture,
) -> Self {
let factors_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[MaterialFactorsUpload::from(factors)]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let texture_view = texture.create_view(&Default::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::Repeat,
address_mode_v: wgpu::AddressMode::Repeat,
address_mode_w: wgpu::AddressMode::Repeat,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare: None,
..Default::default()
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &mesh_pass.textured_unlit.part_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(factors_buf.as_entire_buffer_binding()),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
],
});
Material {
factors_buf,
kind: MaterialKind::TexturedUnlit,
bind_group,
factors,
}
}
fn textured(
device: &mut wgpu::Device,
mesh_pass: &MeshPass,
factors: MaterialFactors,
texture: &wgpu::Texture,
) -> Self {
let factors_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[MaterialFactorsUpload::from(factors)]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let texture_view = texture.create_view(&Default::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::Repeat,
address_mode_v: wgpu::AddressMode::Repeat,
address_mode_w: wgpu::AddressMode::Repeat,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare: None,
..Default::default()
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &mesh_pass.textured.part_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(factors_buf.as_entire_buffer_binding()),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
],
});
Material {
factors_buf,
kind: MaterialKind::Textured,
bind_group,
factors,
}
}
fn textured_norm(
device: &mut wgpu::Device,
mesh_pass: &MeshPass,
factors: MaterialFactors,
texture: &wgpu::Texture,
normal_texture: &wgpu::Texture,
) -> Self {
let factors_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[MaterialFactorsUpload::from(factors)]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let texture_view = texture.create_view(&Default::default());
let normal_map_view = normal_texture.create_view(&Default::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::Repeat,
address_mode_v: wgpu::AddressMode::Repeat,
address_mode_w: wgpu::AddressMode::Repeat,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare: None,
..Default::default()
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &mesh_pass.textured_norm.part_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(factors_buf.as_entire_buffer_binding()),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::TextureView(&normal_map_view),
},
],
});
Material {
factors_buf,
kind: MaterialKind::TexturedNorm,
bind_group,
factors,
}
}
fn textured_norm_mat(
device: &mut wgpu::Device,
mesh_pass: &MeshPass,
factors: MaterialFactors,
texture: &wgpu::Texture,
normal_texture: &wgpu::Texture,
metallic_roughness_texture: &wgpu::Texture,
ao_texture: &wgpu::Texture,
) -> Self {
let factors_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[MaterialFactorsUpload::from(factors)]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let texture_view = texture.create_view(&Default::default());
let normal_map_view = normal_texture.create_view(&Default::default());
let metallic_roughness_map_view = metallic_roughness_texture.create_view(&Default::default());
let ao_map_view = ao_texture.create_view(&Default::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::Repeat,
address_mode_v: wgpu::AddressMode::Repeat,
address_mode_w: wgpu::AddressMode::Repeat,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare: None,
..Default::default()
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &mesh_pass.textured_norm_mat.part_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(factors_buf.as_entire_buffer_binding()),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::TextureView(&normal_map_view),
},
wgpu::BindGroupEntry {
binding: 4,
resource: wgpu::BindingResource::TextureView(&metallic_roughness_map_view),
},
wgpu::BindGroupEntry {
binding: 5,
resource: wgpu::BindingResource::TextureView(&ao_map_view),
},
],
});
Material {
factors_buf,
kind: MaterialKind::TexturedNormMat,
bind_group,
factors,
}
}
fn textured_emissive(
device: &mut wgpu::Device,
mesh_pass: &MeshPass,
factors: MaterialFactors,
texture: &wgpu::Texture,
normal_texture: &wgpu::Texture,
metallic_roughness_texture: &wgpu::Texture,
ao_texture: &wgpu::Texture,
emissive_texture: &wgpu::Texture,
) -> Self {
let factors_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[MaterialFactorsUpload::from(factors)]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let texture_view = texture.create_view(&Default::default());
let normal_map_view = normal_texture.create_view(&Default::default());
let metallic_roughness_map_view = metallic_roughness_texture.create_view(&Default::default());
let ao_map_view = ao_texture.create_view(&Default::default());
let emissive_map_view = emissive_texture.create_view(&Default::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare: None,
..Default::default()
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &mesh_pass.textured_emissive.part_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(factors_buf.as_entire_buffer_binding()),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::TextureView(&normal_map_view),
},
wgpu::BindGroupEntry {
binding: 4,
resource: wgpu::BindingResource::TextureView(&metallic_roughness_map_view),
},
wgpu::BindGroupEntry {
binding: 5,
resource: wgpu::BindingResource::TextureView(&ao_map_view),
},
wgpu::BindGroupEntry {
binding: 6,
resource: wgpu::BindingResource::TextureView(&emissive_map_view),
},
],
});
Material {
factors,
kind: MaterialKind::TexturedEmissive,
factors_buf,
bind_group,
}
}
fn untextured(
device: &mut wgpu::Device,
mesh_pass: &MeshPass,
factors: MaterialFactors,
) -> Self {
let factors_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[MaterialFactorsUpload::from(factors)]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &mesh_pass.untextured.part_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(factors_buf.as_entire_buffer_binding()),
},
],
});
Material {
factors,
kind: MaterialKind::Untextured,
factors_buf,
bind_group,
}
}
pub fn kind(&self) -> MaterialKind { self.kind }
pub fn factors_buf(&self) -> &wgpu::Buffer { &self.factors_buf }
pub fn bind_group(&self) -> &wgpu::BindGroup { &self.bind_group }
}
////////////////////////////////////////////////////////////////////////////////
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct MaterialFactorsUpload {
pub diffuse: [f32; 4],
pub metal: f32,
pub pad0: [u32; 3],
pub rough: f32,
pub pad1: [u32; 3],
pub emissive: [f32; 3],
pub pad2: [u32; 1],
pub extra_emissive: [f32; 3],
pub pad3: [u32; 1],
}
unsafe impl Pod for MaterialFactorsUpload { }
unsafe impl Zeroable for MaterialFactorsUpload { }
impl Default for MaterialFactors {
fn default() -> Self {
MaterialFactors {
diffuse: [1.0, 1.0, 1.0, 1.0],
metal: 1.0,
rough: 1.0,
emissive: [1.0, 1.0, 1.0],
extra_emissive: [0.0, 0.0, 0.0],
}
}
}
impl From<MaterialFactors> for MaterialFactorsUpload {
fn from(v: MaterialFactors) -> Self {
MaterialFactorsUpload {
diffuse: v.diffuse,
metal: v.metal,
rough: v.rough,
emissive: v.emissive,
extra_emissive: v.extra_emissive,
pad0: [0; 3],
pad1: [0; 3],
pad2: [0; 1],
pad3: [0; 1],
}
}
}
| 35.088073 | 134 | 0.52612 |
5b0f2cfdfdac58f85a439de156fb1d32ab4f7d35 | 766 | #![cfg_attr(not(feature = "std"), feature(lang_items, start))]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg_attr(not(feature = "std"), start)]
fn start(_argc: isize, _argv: *const *const u8) -> isize {
0
}
#[lang = "eh_personality"]
#[no_mangle]
#[cfg(not(feature = "std"))]
pub extern "C" fn rust_eh_personality() {}
#[panic_handler]
#[cfg(not(feature = "std"))]
fn panic(_info: &core::panic::PanicInfo) -> ! {
unsafe {
libc::abort();
}
}
use displaydoc::Display;
/// this type is pretty swell
#[derive(Display)]
enum TestType {
/// This one is okay
Variant1,
/// Multi
/// line
/// doc.
Variant2,
}
static_assertions::assert_impl_all!(label; TestType, core::fmt::Display);
#[cfg(feature = "std")]
fn main() {}
| 20.157895 | 73 | 0.612272 |
ef7c9e352698ff0ddd4c151b72d918c999a6d1cb | 106 | mod stdin_input;
mod stdout_output;
pub use stdin_input::StdinInput;
pub use stdout_output::StdoutOutput; | 21.2 | 36 | 0.830189 |
56b9ac4241771e314ad871e0f9e2280ddf0ad584 | 750 | // This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
extern crate lightning;
use lightning::ln::msgs;
mod utils;
use utils::VecWriter;
#[inline]
pub fn do_test(data: &[u8]) {
test_msg!(msgs::UpdateFee, data);
}
#[cfg(feature = "afl")]
#[macro_use] extern crate afl;
#[cfg(feature = "afl")]
fn main() {
fuzz!(|data| {
do_test(data);
});
}
#[cfg(feature = "honggfuzz")]
#[macro_use] extern crate honggfuzz;
#[cfg(feature = "honggfuzz")]
fn main() {
loop {
fuzz!(|data| {
do_test(data);
});
}
}
extern crate hex;
#[cfg(test)]
mod tests {
#[test]
fn duplicate_crash() {
super::do_test(&::hex::decode("00").unwrap());
}
}
| 17.045455 | 80 | 0.653333 |
bfa9b8a2a06ae927eb012e681632056cd52fc7b9 | 434 | use crate::domain::domain::SysUser;
use crate::domain::vo::SysRoleVO;
use serde::{Deserialize, Serialize};
///登录数据
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct SignInVO {
pub user: Option<SysUser>,
pub permissions: Vec<String>,
pub access_token: String,
pub role: Option<SysRoleVO>,
}
impl ToString for SignInVO {
fn to_string(&self) -> String {
serde_json::json!(self).to_string()
}
}
| 22.842105 | 47 | 0.677419 |
267050a46fea04d504277a36e6f7e393d0783fc1 | 2,849 | use crate::metadata::Metadata;
use crate::review::KyokuReview;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::io::prelude::*;
use anyhow::{Context, Result};
use convlog::tenhou::RawPartialLog;
use once_cell::sync::Lazy;
use serde::Serialize;
use serde_json as json;
use tera;
use tera::{Tera, Value};
static TEMPLATES: Lazy<Tera> = Lazy::new(|| {
let mut tera = Tera::default();
tera.register_function("kyoku_to_string", kyoku_to_string);
tera.register_function("pretty_round", pretty_round);
tera.add_raw_templates(vec![
("macros.html", include_str!("../templates/macros.html")),
("pai.svg", include_str!("../assets/pai.svg")),
("report.html", include_str!("../templates/report.html")),
])
.expect("failed to parse template");
tera
});
fn kyoku_to_string(args: &HashMap<String, Value>) -> tera::Result<Value> {
const BAKAZE_KANJI: &[&str] = &["東", "南", "西", "北"];
const NUM_KANJI: &[&str] = &["一", "二", "三", "四"];
let kyoku = if let Some(Value::Number(num)) = args.get("kyoku") {
usize::try_from(num.as_u64().unwrap_or(0)).unwrap_or(0)
} else {
0
};
let honba = if let Some(Value::Number(num)) = args.get("honba") {
usize::try_from(num.as_u64().unwrap_or(0)).unwrap_or(0)
} else {
0
};
let ret = BAKAZE_KANJI[kyoku / 4].to_owned() + NUM_KANJI[kyoku % 4] + "局";
if honba == 0 {
Ok(Value::String(ret))
} else {
Ok(Value::String(ret + " " + &honba.to_string() + " 本場"))
}
}
fn pretty_round(args: &HashMap<String, Value>) -> tera::Result<Value> {
if let Some(Value::Number(num)) = args.get("num") {
if let Some(f) = num.as_f64() {
let n = (f * 1e4).round() / 1e4;
let s = format!("{:.04}", n);
return Ok(Value::String(s));
}
}
Ok(Value::Null)
}
#[derive(Serialize)]
struct View<'a> {
kyokus: &'a [KyokuReview],
target_actor: u8,
#[serde(skip_serializing_if = "Option::is_none")]
splited_logs: Option<&'a [RawPartialLog<'a>]>,
metadata: &'a Metadata<'a>,
}
pub fn render<'a, W>(
w: &mut W,
reviews: &[KyokuReview],
target_actor: u8,
metadata: &Metadata,
splited_logs: Option<&'a [RawPartialLog<'a>]>,
) -> Result<()>
where
W: Write,
{
let view = View {
kyokus: reviews,
target_actor,
splited_logs,
metadata,
};
let ctx = tera::Context::from_serialize(&view)?;
let result =
TEMPLATES
.render("report.html", &ctx)
.with_context(|| match json::to_string(&view) {
Ok(json_string) => format!("with values: {}", json_string),
Err(err) => format!("even serializations failed: {}", err),
})?;
w.write_all(&result.as_bytes())?;
Ok(())
}
| 26.626168 | 78 | 0.571429 |
398a8a1f144ab09492367c5cd68cef0d414099c1 | 5,020 | #[doc = "Register `DEVICEADDRTYPE` reader"]
pub struct R(crate::R<DEVICEADDRTYPE_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<DEVICEADDRTYPE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<DEVICEADDRTYPE_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<DEVICEADDRTYPE_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `DEVICEADDRTYPE` writer"]
pub struct W(crate::W<DEVICEADDRTYPE_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<DEVICEADDRTYPE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<DEVICEADDRTYPE_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<DEVICEADDRTYPE_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Device address type.\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DEVICEADDRTYPE_A {
#[doc = "0: Public address."]
PUBLIC = 0,
#[doc = "1: Random address."]
RANDOM = 1,
}
impl From<DEVICEADDRTYPE_A> for bool {
#[inline(always)]
fn from(variant: DEVICEADDRTYPE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `DEVICEADDRTYPE` reader - Device address type."]
pub struct DEVICEADDRTYPE_R(crate::FieldReader<bool, DEVICEADDRTYPE_A>);
impl DEVICEADDRTYPE_R {
pub(crate) fn new(bits: bool) -> Self {
DEVICEADDRTYPE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DEVICEADDRTYPE_A {
match self.bits {
false => DEVICEADDRTYPE_A::PUBLIC,
true => DEVICEADDRTYPE_A::RANDOM,
}
}
#[doc = "Checks if the value of the field is `PUBLIC`"]
#[inline(always)]
pub fn is_public(&self) -> bool {
**self == DEVICEADDRTYPE_A::PUBLIC
}
#[doc = "Checks if the value of the field is `RANDOM`"]
#[inline(always)]
pub fn is_random(&self) -> bool {
**self == DEVICEADDRTYPE_A::RANDOM
}
}
impl core::ops::Deref for DEVICEADDRTYPE_R {
type Target = crate::FieldReader<bool, DEVICEADDRTYPE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DEVICEADDRTYPE` writer - Device address type."]
pub struct DEVICEADDRTYPE_W<'a> {
w: &'a mut W,
}
impl<'a> DEVICEADDRTYPE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DEVICEADDRTYPE_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Public address."]
#[inline(always)]
pub fn public(self) -> &'a mut W {
self.variant(DEVICEADDRTYPE_A::PUBLIC)
}
#[doc = "Random address."]
#[inline(always)]
pub fn random(self) -> &'a mut W {
self.variant(DEVICEADDRTYPE_A::RANDOM)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - Device address type."]
#[inline(always)]
pub fn deviceaddrtype(&self) -> DEVICEADDRTYPE_R {
DEVICEADDRTYPE_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Device address type."]
#[inline(always)]
pub fn deviceaddrtype(&mut self) -> DEVICEADDRTYPE_W {
DEVICEADDRTYPE_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Device address type.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [deviceaddrtype](index.html) module"]
pub struct DEVICEADDRTYPE_SPEC;
impl crate::RegisterSpec for DEVICEADDRTYPE_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [deviceaddrtype::R](R) reader structure"]
impl crate::Readable for DEVICEADDRTYPE_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [deviceaddrtype::W](W) writer structure"]
impl crate::Writable for DEVICEADDRTYPE_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets DEVICEADDRTYPE to value 0xffff_ffff"]
impl crate::Resettable for DEVICEADDRTYPE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0xffff_ffff
}
}
| 31.375 | 415 | 0.611952 |
91b24799d47a9eac057677ceb87662c467c3dc72 | 77 | rustler::atoms! {
ok,
error,
invalid_signature,
invalid_key
} | 12.833333 | 22 | 0.61039 |
339aee67f0eaaa764e5e8ce6b507ff9dd1cbb5fd | 14,833 | //! Configuring the system clock sources.
//! You will typically need to create an instance of `GenericClockController`
//! before you can set up most of the peripherals on the atsamd51 device.
//! The other types in this module are used to enforce at compile time
//! that the peripherals have been correctly configured.
use crate::target_device::gclk::genctrl::SRC_A::*;
use crate::target_device::gclk::pchctrl::GEN_A::*;
use crate::target_device::{self, GCLK, MCLK, NVMCTRL, OSC32KCTRL, OSCCTRL};
use crate::time::{Hertz, MegaHertz};
pub type ClockGenId = target_device::gclk::pchctrl::GEN_A;
pub type ClockSource = target_device::gclk::genctrl::SRC_A;
#[allow(bad_style)]
pub enum ClockId {
DFLL48 = 0,
FDPLL0,
FDPLL1,
SLOW_32K,
EIC,
FREQM_MSR,
FREQM_REF,
SERCOM0_CORE,
SERCOM1_CORE,
TC0_TC1,
USB,
EVSYS0,
EVSYS1,
EVSYS2,
EVSYS3,
EVSYS4,
EVSYS5,
EVSYS6,
EVSYS7,
EVSYS8,
EVSYS9,
EVSYS10,
EVSYS11,
SERCOM2_CORE,
SERCOM3_CORE,
TCC0_TCC1,
TC2_TC3,
CAN0,
CAN1,
TCC2_TCC3,
TC4_TC5,
PDEC,
AC,
CCL,
SERCOM4_CORE,
SERCOM5_CORE,
SERCOM6_CORE,
SERCOM7_CORE,
TCC4,
TC6_TC7,
ADC0,
ADC1,
DAC,
I2S0,
I2S1,
SDHC0,
SDHC1,
CM4_TRACE,
}
impl From<ClockId> for u8 {
fn from(clock: ClockId) -> u8 {
clock as u8
}
}
/// Represents a configured clock generator.
/// Can be converted into the effective clock frequency.
/// Its primary purpose is to be passed in to methods
/// such as `GenericClockController::tcc2_tc3` to configure
/// the clock for a peripheral.
//#[derive(Clone, Copy)]
pub struct GClock {
gclk: ClockGenId,
freq: Hertz,
}
impl Into<Hertz> for GClock {
fn into(self) -> Hertz {
self.freq
}
}
struct State {
gclk: GCLK,
}
impl State {
fn reset_gclk(&mut self) {
self.gclk.ctrla.write(|w| w.swrst().set_bit());
while self.gclk.ctrla.read().swrst().bit_is_set() || self.gclk.syncbusy.read().bits() != 0 {
}
}
fn wait_for_sync(&mut self) {
while self.gclk.syncbusy.read().bits() != 0 {}
}
fn set_gclk_divider_and_source(
&mut self,
gclk: ClockGenId,
divider: u16,
src: ClockSource,
improve_duty_cycle: bool,
) {
self.gclk.genctrl[u8::from(gclk) as usize].write(|w| unsafe {
w.src().variant(src);
w.div().bits(divider);
// divide directly by divider, rather than 2^(n+1)
w.divsel().clear_bit();
w.idc().bit(improve_duty_cycle);
w.genen().set_bit();
w.oe().set_bit()
});
self.wait_for_sync();
}
fn enable_clock_generator(&mut self, clock: ClockId, generator: ClockGenId) {
self.gclk.pchctrl[u8::from(clock) as usize].write(|w| unsafe {
w.gen().bits(generator.into());
w.chen().set_bit()
});
self.wait_for_sync();
}
}
/// `GenericClockController` encapsulates the GCLK hardware.
/// It provides a type safe way to configure the system clocks.
/// Initializing the `GenericClockController` instance configures
/// the system to run at 120MHz by taking the DFLL48
/// and feeding it into the DPLL0 hardware which multiplies the
/// signal by 2.5x.
pub struct GenericClockController {
state: State,
gclks: [Hertz; 12],
used_clocks: u64,
}
impl GenericClockController {
/// Reset the clock controller, configure the system to run
/// at 120Mhz and reset various clock dividers.
pub fn with_internal_32kosc(
gclk: GCLK,
mclk: &mut MCLK,
osc32kctrl: &mut OSC32KCTRL,
oscctrl: &mut OSCCTRL,
nvmctrl: &mut NVMCTRL,
) -> Self {
Self::new(gclk, mclk, osc32kctrl, oscctrl, nvmctrl, false)
}
/// Reset the clock controller, configure the system to run
/// at 120Mhz and reset various clock dividers.
pub fn with_external_32kosc(
gclk: GCLK,
mclk: &mut MCLK,
osc32kctrl: &mut OSC32KCTRL,
oscctrl: &mut OSCCTRL,
nvmctrl: &mut NVMCTRL,
) -> Self {
Self::new(gclk, mclk, osc32kctrl, oscctrl, nvmctrl, true)
}
fn new(
gclk: GCLK,
mclk: &mut MCLK,
osc32kctrl: &mut OSC32KCTRL,
oscctrl: &mut OSCCTRL,
nvmctrl: &mut NVMCTRL,
use_external_crystal: bool,
) -> Self {
let mut state = State { gclk };
set_flash_to_half_auto_wait_state(nvmctrl);
enable_gclk_apb(mclk);
if use_external_crystal {
enable_external_32kosc(osc32kctrl);
state.reset_gclk();
state.set_gclk_divider_and_source(GCLK1, 1, XOSC32K, false);
} else {
enable_internal_32kosc(osc32kctrl);
state.reset_gclk();
state.set_gclk_divider_and_source(GCLK1, 1, OSCULP32K, false);
}
while state.gclk.syncbusy.read().genctrl0().is_gclk0() {}
#[cfg(feature = "usb")]
configure_usb_correction(oscctrl);
// GCLK5 set to 2MHz
unsafe {
state.gclk.genctrl[5].write(|w| {
w.src().dfll();
w.genen().set_bit();
w.div().bits(24)
});
}
while state.gclk.syncbusy.read().genctrl5().is_gclk5() {}
configure_and_enable_dpll0(oscctrl, &mut state.gclk);
wait_for_dpllrdy(oscctrl);
unsafe {
// GCLK0 set to DPLL0 (120MHz)
state.gclk.genctrl[0].write(|w| {
w.src().dpll0();
w.div().bits(1);
w.oe().set_bit();
w.genen().set_bit()
});
}
while state.gclk.syncbusy.read().genctrl0().is_gclk0() {}
mclk.cpudiv.write(|w| w.div().div1());
Self {
state,
gclks: [
OSC120M_FREQ,
OSC32K_FREQ,
Hertz(0),
Hertz(0),
Hertz(0),
MegaHertz(2).into(),
Hertz(0),
Hertz(0),
Hertz(0),
Hertz(0),
Hertz(0),
Hertz(0),
],
used_clocks: 1u64 << u8::from(ClockId::FDPLL0),
}
}
/// Returns a `GClock` for gclk0, the 120MHz oscillator.
pub fn gclk0(&mut self) -> GClock {
GClock {
gclk: GCLK0,
freq: self.gclks[0],
}
}
/// Returns a `GClock` for gclk1, the 32KHz oscillator.
pub fn gclk1(&mut self) -> GClock {
GClock {
gclk: GCLK1,
freq: self.gclks[1],
}
}
/// Returns the `GClock` for the specified clock generator.
/// If that clock generator has not yet been configured,
/// returns None.
pub fn get_gclk(&mut self, gclk: ClockGenId) -> Option<GClock> {
let idx = u8::from(gclk) as usize;
if self.gclks[idx].0 == 0 {
None
} else {
Some(GClock {
gclk,
freq: self.gclks[idx],
})
}
}
/// Configures a clock generator with the specified divider and
/// source.
/// `divider` is a linear divider to be applied to the clock
/// source. While the hardware also supports an exponential divider,
/// this function doesn't expose that functionality at this time.
/// `improve_duty_cycle` is a boolean that, when set to true, enables
/// a 50/50 duty cycle for odd divider values.
/// Returns a `GClock` for the configured clock generator.
/// Returns `None` if the clock generator has already been configured.
pub fn configure_gclk_divider_and_source(
&mut self,
gclk: ClockGenId,
divider: u16,
src: ClockSource,
improve_duty_cycle: bool,
) -> Option<GClock> {
let idx = u8::from(gclk) as usize;
if self.gclks[idx].0 != 0 {
return None;
}
self.state
.set_gclk_divider_and_source(gclk, divider, src, improve_duty_cycle);
let freq: Hertz = match src {
XOSC32K | OSCULP32K => OSC32K_FREQ,
GCLKGEN1 => self.gclks[1],
DFLL => OSC48M_FREQ,
DPLL0 => OSC120M_FREQ,
XOSC0 | XOSC1 | GCLKIN | DPLL1 => unimplemented!(),
};
self.gclks[idx] = Hertz(freq.0 / divider as u32);
Some(GClock { gclk, freq })
}
}
macro_rules! clock_generator {
($(($id:ident, $Type:ident, $clock:ident),)+) => {
$(
/// A typed token that indicates that the clock for the peripheral(s)
/// with the matching name has been configured.
/// The effective clock frequency is available via the `freq` method,
/// or by converting the object into a `Hertz` instance.
/// The peripheral initialization code will typically require passing
/// in this object to prove at compile time that the clock has been
/// correctly initialized.
#[derive(Debug)]
pub struct $Type {
freq: Hertz,
}
impl $Type {
/// Returns the frequency of the configured clock
pub fn freq(&self) -> Hertz {
self.freq
}
}
impl Into<Hertz> for $Type {
fn into(self) -> Hertz {
self.freq
}
}
)+
impl GenericClockController {
$(
/// Configure the clock for peripheral(s) that match the name
/// of this function to use the specific clock generator.
/// The `GClock` parameter may be one of default clocks
/// return from `gclk0()`, `gclk1()` or a clock configured
/// by the host application using the `configure_gclk_divider_and_source`
/// method.
/// Returns a typed token that proves that the clock has been configured;
/// the peripheral initialization code will typically require that this
/// clock token be passed in to ensure that the clock has been initialized
/// appropriately.
/// Returns `None` is the specified generic clock has already been
/// configured.
pub fn $id(&mut self, generator: &GClock) -> Option<$Type> {
let bits: u64 = 1<<u8::from(ClockId::$clock) as u64;
if (self.used_clocks & bits) != 0 {
return None;
}
self.used_clocks |= bits;
self.state.enable_clock_generator(ClockId::$clock, generator.gclk);
let freq = self.gclks[u8::from(generator.gclk) as usize];
Some($Type{freq})
}
)+
}
}
}
clock_generator!(
(tc0_tc1, Tc0Tc1Clock, TC0_TC1),
(tcc0_tcc1, Tcc0Tcc1Clock, TCC0_TCC1),
(tc2_tc3, Tc2Tc3Clock, TC2_TC3),
(tcc2_tcc3, Tcc2Tcc3Clock, TCC2_TCC3),
(tc4_tc5, Tc4Tc5Clock, TC4_TC5),
(tcc4, Tcc4Clock, TCC4),
(tc6_tc7, Tc6Tc7Clock, TC6_TC7),
(sercom0_core, Sercom0CoreClock, SERCOM0_CORE),
(sercom1_core, Sercom1CoreClock, SERCOM1_CORE),
(sercom2_core, Sercom2CoreClock, SERCOM2_CORE),
(sercom3_core, Sercom3CoreClock, SERCOM3_CORE),
(sercom4_core, Sercom4CoreClock, SERCOM4_CORE),
(sercom5_core, Sercom5CoreClock, SERCOM5_CORE),
(sercom6_core, Sercom6CoreClock, SERCOM6_CORE),
(sercom7_core, Sercom7CoreClock, SERCOM7_CORE),
(usb, UsbClock, USB),
(adc0, Adc0Clock, ADC0),
(adc1, Adc1Clock, ADC1),
(eic, EicClock, EIC),
(freq_m_msr, FreqmMsrClock, FREQM_MSR),
(freq_m_ref, FreqmRefClock, FREQM_REF),
(evsys0, Evsys0Clock, EVSYS0),
(evsys1, Evsys1Clock, EVSYS1),
(evsys2, Evsys2Clock, EVSYS2),
(evsys3, Evsys3Clock, EVSYS3),
(evsys4, Evsys4Clock, EVSYS4),
(evsys5, Evsys5Clock, EVSYS5),
(evsys6, Evsys6Clock, EVSYS6),
(evsys7, Evsys7Clock, EVSYS7),
(evsys8, Evsys8Clock, EVSYS8),
(evsys9, Evsys9Clock, EVSYS9),
(evsys10, Evsys10Clock, EVSYS10),
(evsys11, Evsys11Clock, EVSYS11),
(can0, Can0Clock, CAN0),
(can1, Can1Clock, CAN1),
(pdec, PdecClock, PDEC),
(ac, AcClock, AC),
(ccl, CclClock, CCL),
(dac, DacClock, DAC),
(i2s0, I2S0Clock, I2S0),
(i2s1, I2S1Clock, I2S1),
(sdhc0, Sdhc0Clock, SDHC0),
(sdhc1, Sdhc1Clock, SDHC1),
(cm4_trace, Cm4TraceClock, CM4_TRACE),
);
/// The frequency of the 48Mhz source.
pub const OSC48M_FREQ: Hertz = Hertz(48_000_000);
/// The frequency of the 32Khz source.
pub const OSC32K_FREQ: Hertz = Hertz(32_768);
/// The frequency of the 120Mhz source.
pub const OSC120M_FREQ: Hertz = Hertz(120_000_000);
fn set_flash_to_half_auto_wait_state(nvmctrl: &mut NVMCTRL) {
todo!();
// nvmctrl.ctrla.modify(|_, w| w.rws().half());
// TODO Fix above
}
fn enable_gclk_apb(mclk: &mut MCLK) {
mclk.apbamask.modify(|_, w| w.gclk_().set_bit());
}
/// Turn on the internal 32hkz oscillator
fn enable_internal_32kosc(osc32kctrl: &mut OSC32KCTRL) {
osc32kctrl.osculp32k.modify(|_, w| {
w.en32k().set_bit();
w.en1k().set_bit()
});
osc32kctrl.rtcctrl.write(|w| w.rtcsel().ulp1k());
}
/// Turn on the external 32hkz oscillator
fn enable_external_32kosc(osc32kctrl: &mut OSC32KCTRL) {
osc32kctrl.xosc32k.modify(|_, w| {
w.ondemand().clear_bit();
// Enable 32khz output
w.en32k().set_bit();
w.en1k().set_bit();
// Crystal connected to xin32/xout32
w.xtalen().set_bit();
w.enable().set_bit();
w.cgm().xt()
});
osc32kctrl.rtcctrl.write(|w| w.rtcsel().xosc1k());
// Wait for the oscillator to stabilize
while osc32kctrl.status.read().xosc32krdy().bit_is_clear() {}
}
fn wait_for_dpllrdy(oscctrl: &mut OSCCTRL) {
while oscctrl.dpllstatus0.read().lock().bit_is_clear()
|| oscctrl.dpllstatus0.read().clkrdy().bit_is_clear()
{}
}
/// Configure the dpll0 to run at 120MHz
fn configure_and_enable_dpll0(oscctrl: &mut OSCCTRL, gclk: &mut GCLK) {
gclk.pchctrl[u8::from(ClockId::FDPLL0) as usize].write(|w| {
w.chen().set_bit();
w.gen().gclk5()
});
unsafe {
oscctrl.dpllratio0.write(|w| {
w.ldr().bits(59);
w.ldrfrac().bits(0)
});
}
oscctrl.dpllctrlb0.write(|w| w.refclk().gclk());
oscctrl.dpllctrla0.write(|w| {
w.enable().set_bit();
w.ondemand().clear_bit()
});
}
#[cfg(feature = "usb")]
/// Configure the dfll48m to calibrate against the 1Khz USB SOF reference.
fn configure_usb_correction(oscctrl: &mut OSCCTRL) {
oscctrl.dfllmul.write(|w| unsafe {
w.cstep().bits(0x1)
.fstep().bits(0x1)
// scaling factor for 1Khz SOF signal.
.mul().bits((48_000_000u32 / 1000) as u16)
});
while oscctrl.dfllsync.read().dfllmul().bit_is_set() {}
oscctrl.dfllctrlb.write(|w| {
// closed loop mode
w.mode().set_bit()
// chill cycle disable
.ccdis().set_bit()
// usb correction
.usbcrm().set_bit()
});
while oscctrl.dfllsync.read().dfllctrlb().bit_is_set() {}
}
| 29.25641 | 100 | 0.59853 |
dd42971844981c7920e88af8512296b6cc3643d7 | 9,798 | //! The `pact_mock_server` crate provides a number of exported functions using C bindings for
//! controlling the mock server. These can be used in any language that supports C bindings.
//!
//! ## [create_mock_server](fn.create_mock_server_ffi.html)
//!
//! External interface to create a mock server. A pointer to the pact JSON as a C string is passed in,
//! as well as the port for the mock server to run on. A value of 0 for the port will result in a
//! port being allocated by the operating system. The port of the mock server is returned.
//!
//! ## [mock_server_matched](fn.mock_server_matched_ffi.html)
//!
//! Simple function that returns a boolean value given the port number of the mock service. This value will be true if all
//! the expectations of the pact that the mock server was created with have been met. It will return false if any request did
//! not match, an un-recognised request was received or an expected request was not received.
//!
//! ## [mock_server_mismatches](fn.mock_server_mismatches_ffi.html)
//!
//! This returns all the mismatches, un-expected requests and missing requests in JSON format, given the port number of the
//! mock server.
//!
//! **IMPORTANT NOTE:** The JSON string for the result is allocated on the rust heap, and will have to be freed once the
//! code using the mock server is complete. The [`cleanup_mock_server`](fn.cleanup_mock_server.html) function is provided for this purpose. If the mock
//! server is not cleaned up properly, this will result in memory leaks as the rust heap will not be reclaimed.
//!
//! ## [cleanup_mock_server](fn.cleanup_mock_server.html)
//!
//! This function will try terminate the mock server with the given port number and cleanup any memory allocated for it by
//! the [`mock_server_mismatches`](fn.mock_server_mismatches.html) function. Returns `true`, unless a mock server with the given port number does not exist,
//! or the function fails in some way.
//!
//! **NOTE:** Although `close()` on the listerner for the mock server is called, this does not currently work and the
//! listerner will continue handling requests. In this case, it will always return a 501 once the mock server has been
//! cleaned up.
//!
//! ## [write_pact_file](fn.write_pact_file.html)
//!
//! External interface to trigger a mock server to write out its pact file. This function should
//! be called if all the consumer tests have passed. The directory to write the file to is passed
//! as the second parameter. If a NULL pointer is passed, the current working directory is used.
//!
//! Returns 0 if the pact file was successfully written. Returns a positive code if the file can
//! not be written, or there is no mock server running on that port or the function panics.
#![warn(missing_docs)]
extern crate pact_mock_server;
extern crate libc;
extern crate serde_json;
extern crate env_logger;
#[macro_use] extern crate log;
use std::panic::catch_unwind;
use libc::c_char;
use std::ffi::CStr;
use std::ffi::CString;
use std::str;
use serde_json::json;
use pact_mock_server::{MockServerError, WritePactFileErr, MANAGER};
use pact_mock_server::server_manager::ServerManager;
/// External interface to create a mock server. A pointer to the pact JSON as a C string is passed in,
/// as well as the port for the mock server to run on. A value of 0 for the port will result in a
/// port being allocated by the operating system. The port of the mock server is returned.
///
/// # Errors
///
/// Errors are returned as negative values.
///
/// | Error | Description |
/// |-------|-------------|
/// | -1 | A null pointer was received |
/// | -2 | The pact JSON could not be parsed |
/// | -3 | The mock server could not be started |
/// | -4 | The method panicked |
/// | -5 | The address is not valid |
///
#[no_mangle]
pub extern fn create_mock_server_ffi(pact_str: *const c_char, addr_str: *const c_char) -> i32 {
env_logger::init();
let result = catch_unwind(|| {
let c_str = unsafe {
if pact_str.is_null() {
error!("Got a null pointer instead of pact json");
return -1;
}
CStr::from_ptr(pact_str)
};
let addr_c_str = unsafe {
if addr_str.is_null() {
error!("Got a null pointer instead of listener address");
return -1;
}
CStr::from_ptr(addr_str)
};
if let Ok(Ok(addr)) = str::from_utf8(addr_c_str.to_bytes()).map(|s| s.parse::<std::net::SocketAddr>()) {
match pact_mock_server::create_mock_server(str::from_utf8(c_str.to_bytes()).unwrap(), addr) {
Ok(ms_port) => ms_port,
Err(err) => match err {
MockServerError::InvalidPactJson => -2,
MockServerError::MockServerFailedToStart => -3
}
}
}
else {
-5
}
});
match result {
Ok(val) => val,
Err(cause) => {
error!("Caught a general panic: {:?}", cause);
-4
}
}
}
/// External interface to check if a mock server has matched all its requests. The port number is
/// passed in, and if all requests have been matched, true is returned. False is returned if there
/// is no mock server on the given port, or if any request has not been successfully matched, or
/// the method panics.
#[no_mangle]
pub extern fn mock_server_matched(mock_server_port: i32) -> bool {
let result = catch_unwind(|| {
pact_mock_server::mock_server_matched(mock_server_port)
});
match result {
Ok(val) => val,
Err(cause) => {
error!("Caught a general panic: {:?}", cause);
false
}
}
}
/// External interface to get all the mismatches from a mock server. The port number of the mock
/// server is passed in, and a pointer to a C string with the mismatches in JSON format is
/// returned.
///
/// **NOTE:** The JSON string for the result is allocated on the heap, and will have to be freed
/// once the code using the mock server is complete. The [`cleanup_mock_server`](fn.cleanup_mock_server.html) function is
/// provided for this purpose.
///
/// # Errors
///
/// If there is no mock server with the provided port number, or the function panics, a NULL
/// pointer will be returned. Don't try to dereference it, it will not end well for you.
///
#[no_mangle]
pub extern fn mock_server_mismatches(mock_server_port: i32) -> *mut c_char {
let result = catch_unwind(|| {
let result = MANAGER.lock().unwrap()
.get_or_insert_with(ServerManager::new)
.find_mock_server_by_port_mut(mock_server_port as u16, &|ref mut mock_server| {
let mismatches = mock_server.mismatches().iter()
.map(|mismatch| mismatch.to_json() )
.collect::<Vec<serde_json::Value>>();
let json = json!(mismatches);
let s = CString::new(json.to_string()).unwrap();
let p = s.as_ptr();
mock_server.resources.push(s);
p
});
match result {
Some(p) => p as *mut _,
None => 0 as *mut _
}
});
match result {
Ok(val) => val,
Err(cause) => {
error!("Caught a general panic: {:?}", cause);
0 as *mut _
}
}
}
/// External interface to cleanup a mock server. This function will try terminate the mock server
/// with the given port number and cleanup any memory allocated for it. Returns true, unless a
/// mock server with the given port number does not exist, or the function panics.
///
/// **NOTE:** Although `close()` on the listener for the mock server is called, this does not
/// currently work and the listener will continue handling requests. In this
/// case, it will always return a 404 once the mock server has been cleaned up.
#[no_mangle]
pub extern fn cleanup_mock_server(mock_server_port: i32) -> bool {
let result = catch_unwind(|| {
MANAGER.lock().unwrap()
.get_or_insert_with(ServerManager::new)
.shutdown_mock_server_by_port(mock_server_port as u16)
});
match result {
Ok(val) => val,
Err(cause) => {
error!("Caught a general panic: {:?}", cause);
false
}
}
}
/// External interface to trigger a mock server to write out its pact file. This function should
/// be called if all the consumer tests have passed. The directory to write the file to is passed
/// as the second parameter. If a NULL pointer is passed, the current working directory is used.
///
/// Returns 0 if the pact file was successfully written. Returns a positive code if the file can
/// not be written, or there is no mock server running on that port or the function panics.
///
/// # Errors
///
/// Errors are returned as positive values.
///
/// | Error | Description |
/// |-------|-------------|
/// | 1 | A general panic was caught |
/// | 2 | The pact file was not able to be written |
/// | 3 | A mock server with the provided port was not found |
#[no_mangle]
pub extern fn write_pact_file(mock_server_port: i32, directory: *const c_char) -> i32 {
let result = catch_unwind(|| {
let dir = unsafe {
if directory.is_null() {
warn!("Directory to write to is NULL, defaulting to the current working directory");
None
} else {
let c_str = CStr::from_ptr(directory);
let dir_str = str::from_utf8(c_str.to_bytes()).unwrap();
if dir_str.is_empty() {
None
} else {
Some(dir_str.to_string())
}
}
};
pact_mock_server::write_pact_file(mock_server_port, dir)
});
match result {
Ok(val) => match val {
Ok(_) => 0,
Err(err) => match err {
WritePactFileErr::IOError => 2,
WritePactFileErr::NoMockServer => 3
}
},
Err(cause) => {
error!("Caught a general panic: {:?}", cause);
1
}
}
}
| 37.976744 | 156 | 0.660747 |
1ab337453517941b84259981e428e8cfee5d5c87 | 791 | #[cfg(test)]
mod tests {
use fizzbuzz::fizzbuzz_to_thirty;
#[test]
fn test_fizzbuzz_to_thirty() {
assert_eq!(
"1\n\
2\n\
fizz\n\
4\n\
buzz\n\
fizz\n\
7\n\
8\n\
fizz\n\
buzz\n\
11\n\
fizz\n\
13\n\
14\n\
fizzbuzz\n\
16\n\
17\n\
fizz\n\
19\n\
buzz\n\
fizz\n\
22\n\
23\n\
fizz\n\
buzz\n\
26\n\
fizz\n\
28\n\
29\n\
fizzbuzz\n\
",
fizzbuzz_to_thirty());
}
}
| 18.833333 | 37 | 0.279393 |
e22918ce8d87ab0a54e285bd95da77ef39493f7b | 75,754 | // DO NOT EDIT !
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
//! This documentation was generated from *Cloud IAP* crate version *1.0.14+20200629*, where *20200629* is the exact revision of the *iap:v1beta1* schema built by the [mako](http://www.makotemplates.org/) code generator *v1.0.14*.
//!
//! Everything else about the *Cloud IAP* *v1_beta1* API can be found at the
//! [official documentation site](https://cloud.google.com/iap).
//! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/iap1_beta1).
//! # Features
//!
//! Use the following functionality with ease from the central [hub](struct.CloudIAP.html) ...
//!
//!
//! * [get iam policy](struct.MethodGetIamPolicyCall.html)
//! * [set iam policy](struct.MethodSetIamPolicyCall.html)
//! * [test iam permissions](struct.MethodTestIamPermissionCall.html)
//!
//!
//!
//! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](http://byron.github.io/google-apis-rs).
//!
//! # Structure of this Library
//!
//! The API is structured into the following primary items:
//!
//! * **[Hub](struct.CloudIAP.html)**
//! * a central object to maintain state and allow accessing all *Activities*
//! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn
//! allow access to individual [*Call Builders*](trait.CallBuilder.html)
//! * **[Resources](trait.Resource.html)**
//! * primary types that you can apply *Activities* to
//! * a collection of properties and *Parts*
//! * **[Parts](trait.Part.html)**
//! * a collection of properties
//! * never directly used in *Activities*
//! * **[Activities](trait.CallBuilder.html)**
//! * operations to apply to *Resources*
//!
//! All *structures* are marked with applicable traits to further categorize them and ease browsing.
//!
//! Generally speaking, you can invoke *Activities* like this:
//!
//! ```Rust,ignore
//! let r = hub.resource().activity(...).doit()
//! ```
//!
//! Or specifically ...
//!
//! ```ignore
//! let r = hub.methods().get_iam_policy(...).doit()
//! let r = hub.methods().set_iam_policy(...).doit()
//! ```
//!
//! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities`
//! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be
//! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired.
//! The `doit()` method performs the actual communication with the server and returns the respective result.
//!
//! # Usage
//!
//! ## Setting up your Project
//!
//! To use this library, you would put the following lines into your `Cargo.toml` file:
//!
//! ```toml
//! [dependencies]
//! google-iap1_beta1 = "*"
//! # This project intentionally uses an old version of Hyper. See
//! # https://github.com/Byron/google-apis-rs/issues/173 for more
//! # information.
//! hyper = "^0.10"
//! hyper-rustls = "^0.6"
//! serde = "^1.0"
//! serde_json = "^1.0"
//! yup-oauth2 = "^1.0"
//! ```
//!
//! ## A complete example
//!
//! ```test_harness,no_run
//! extern crate hyper;
//! extern crate hyper_rustls;
//! extern crate yup_oauth2 as oauth2;
//! extern crate google_iap1_beta1 as iap1_beta1;
//! use iap1_beta1::GetIamPolicyRequest;
//! use iap1_beta1::{Result, Error};
//! # #[test] fn egal() {
//! use std::default::Default;
//! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
//! use iap1_beta1::CloudIAP;
//!
//! // Get an ApplicationSecret instance by some means. It contains the `client_id` and
//! // `client_secret`, among other things.
//! let secret: ApplicationSecret = Default::default();
//! // Instantiate the authenticator. It will choose a suitable authentication flow for you,
//! // unless you replace `None` with the desired Flow.
//! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
//! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
//! // retrieve them from storage.
//! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
//! hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
//! <MemoryStorage as Default>::default(), None);
//! let mut hub = CloudIAP::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
//! // As the method needs a request, you would usually fill it with the desired information
//! // into the respective structure. Some of the parts shown here might not be applicable !
//! // Values shown here are possibly random and not representative !
//! let mut req = GetIamPolicyRequest::default();
//!
//! // You can configure optional parameters by calling the respective setters at will, and
//! // execute the final call using `doit()`.
//! // Values shown here are possibly random and not representative !
//! let result = hub.methods().get_iam_policy(req, "resource")
//! .doit();
//!
//! match result {
//! Err(e) => match e {
//! // The Error enum provides details about what exactly happened.
//! // You can also just use its `Debug`, `Display` or `Error` traits
//! Error::HttpError(_)
//! |Error::MissingAPIKey
//! |Error::MissingToken(_)
//! |Error::Cancelled
//! |Error::UploadSizeLimitExceeded(_, _)
//! |Error::Failure(_)
//! |Error::BadRequest(_)
//! |Error::FieldClash(_)
//! |Error::JsonDecodeError(_, _) => println!("{}", e),
//! },
//! Ok(res) => println!("Success: {:?}", res),
//! }
//! # }
//! ```
//! ## Handling Errors
//!
//! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of
//! the doit() methods, or handed as possibly intermediate results to either the
//! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](https://docs.rs/yup-oauth2/*/yup_oauth2/trait.AuthenticatorDelegate.html).
//!
//! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This
//! makes the system potentially resilient to all kinds of errors.
//!
//! ## Uploads and Downloads
//! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be
//! read by you to obtain the media.
//! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default.
//! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making
//! this call: `.param("alt", "media")`.
//!
//! Methods supporting uploads can do so using up to 2 different protocols:
//! *simple* and *resumable*. The distinctiveness of each is represented by customized
//! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively.
//!
//! ## Customization and Callbacks
//!
//! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the
//! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call.
//! Respective methods will be called to provide progress information, as well as determine whether the system should
//! retry on failure.
//!
//! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort.
//!
//! ## Optional Parts in Server-Requests
//!
//! All structures provided by this library are made to be [encodable](trait.RequestValue.html) and
//! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses
//! are valid.
//! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to
//! the server to indicate either the set parts of the request or the desired parts in the response.
//!
//! ## Builder Arguments
//!
//! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods.
//! These will always take a single argument, for which the following statements are true.
//!
//! * [PODs][wiki-pod] are handed by copy
//! * strings are passed as `&str`
//! * [request values](trait.RequestValue.html) are moved
//!
//! Arguments will always be copied or cloned into the builder, to make them independent of their original life times.
//!
//! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure
//! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern
//! [google-go-api]: https://github.com/google/google-api-go-client
//!
//!
// Unused attributes happen thanks to defined, but unused structures
// We don't warn about this, as depending on the API, some data structures or facilities are never used.
// Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any
// unused imports in fully featured APIs. Same with unused_mut ... .
#![allow(unused_imports, unused_mut, dead_code)]
// DO NOT EDIT !
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
#[macro_use]
extern crate serde_derive;
extern crate hyper;
extern crate serde;
extern crate serde_json;
extern crate yup_oauth2 as oauth2;
extern crate mime;
extern crate url;
mod cmn;
use std::collections::HashMap;
use std::cell::RefCell;
use std::borrow::BorrowMut;
use std::default::Default;
use std::collections::BTreeMap;
use serde_json as json;
use std::io;
use std::fs;
use std::mem;
use std::thread::sleep;
use std::time::Duration;
pub use cmn::*;
// ##############
// UTILITIES ###
// ############
/// Identifies the an OAuth2 authorization scope.
/// A scope is needed when requesting an
/// [authorization token](https://developers.google.com/youtube/v3/guides/authentication).
#[derive(PartialEq, Eq, Hash)]
pub enum Scope {
/// View and manage your data across Google Cloud Platform services
CloudPlatform,
}
impl AsRef<str> for Scope {
fn as_ref(&self) -> &str {
match *self {
Scope::CloudPlatform => "https://www.googleapis.com/auth/cloud-platform",
}
}
}
impl Default for Scope {
fn default() -> Scope {
Scope::CloudPlatform
}
}
// ########
// HUB ###
// ######
/// Central instance to access all CloudIAP related resource activities
///
/// # Examples
///
/// Instantiate a new hub
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_iap1_beta1 as iap1_beta1;
/// use iap1_beta1::GetIamPolicyRequest;
/// use iap1_beta1::{Result, Error};
/// # #[test] fn egal() {
/// use std::default::Default;
/// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// use iap1_beta1::CloudIAP;
///
/// // Get an ApplicationSecret instance by some means. It contains the `client_id` and
/// // `client_secret`, among other things.
/// let secret: ApplicationSecret = Default::default();
/// // Instantiate the authenticator. It will choose a suitable authentication flow for you,
/// // unless you replace `None` with the desired Flow.
/// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
/// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
/// // retrieve them from storage.
/// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// <MemoryStorage as Default>::default(), None);
/// let mut hub = CloudIAP::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = GetIamPolicyRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.methods().get_iam_policy(req, "resource")
/// .doit();
///
/// match result {
/// Err(e) => match e {
/// // The Error enum provides details about what exactly happened.
/// // You can also just use its `Debug`, `Display` or `Error` traits
/// Error::HttpError(_)
/// |Error::MissingAPIKey
/// |Error::MissingToken(_)
/// |Error::Cancelled
/// |Error::UploadSizeLimitExceeded(_, _)
/// |Error::Failure(_)
/// |Error::BadRequest(_)
/// |Error::FieldClash(_)
/// |Error::JsonDecodeError(_, _) => println!("{}", e),
/// },
/// Ok(res) => println!("Success: {:?}", res),
/// }
/// # }
/// ```
pub struct CloudIAP<C, A> {
client: RefCell<C>,
auth: RefCell<A>,
_user_agent: String,
_base_url: String,
_root_url: String,
}
impl<'a, C, A> Hub for CloudIAP<C, A> {}
impl<'a, C, A> CloudIAP<C, A>
where C: BorrowMut<hyper::Client>, A: oauth2::GetToken {
pub fn new(client: C, authenticator: A) -> CloudIAP<C, A> {
CloudIAP {
client: RefCell::new(client),
auth: RefCell::new(authenticator),
_user_agent: "google-api-rust-client/1.0.14".to_string(),
_base_url: "https://iap.googleapis.com/".to_string(),
_root_url: "https://iap.googleapis.com/".to_string(),
}
}
pub fn methods(&'a self) -> MethodMethods<'a, C, A> {
MethodMethods { hub: &self }
}
/// Set the user-agent header field to use in all requests to the server.
/// It defaults to `google-api-rust-client/1.0.14`.
///
/// Returns the previously set user-agent.
pub fn user_agent(&mut self, agent_name: String) -> String {
mem::replace(&mut self._user_agent, agent_name)
}
/// Set the base url to use in all requests to the server.
/// It defaults to `https://iap.googleapis.com/`.
///
/// Returns the previously set base url.
pub fn base_url(&mut self, new_base_url: String) -> String {
mem::replace(&mut self._base_url, new_base_url)
}
/// Set the root url to use in all requests to the server.
/// It defaults to `https://iap.googleapis.com/`.
///
/// Returns the previously set root url.
pub fn root_url(&mut self, new_root_url: String) -> String {
mem::replace(&mut self._root_url, new_root_url)
}
}
// ############
// SCHEMAS ###
// ##########
/// Response message for `TestIamPermissions` method.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [test iam permissions](struct.MethodTestIamPermissionCall.html) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TestIamPermissionsResponse {
/// A subset of `TestPermissionsRequest.permissions` that the caller is
/// allowed.
pub permissions: Option<Vec<String>>,
}
impl ResponseResult for TestIamPermissionsResponse {}
/// Request message for `GetIamPolicy` method.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get iam policy](struct.MethodGetIamPolicyCall.html) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GetIamPolicyRequest {
/// OPTIONAL: A `GetPolicyOptions` object for specifying options to
/// `GetIamPolicy`.
pub options: Option<GetPolicyOptions>,
}
impl RequestValue for GetIamPolicyRequest {}
/// Request message for `TestIamPermissions` method.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [test iam permissions](struct.MethodTestIamPermissionCall.html) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TestIamPermissionsRequest {
/// The set of permissions to check for the `resource`. Permissions with
/// wildcards (such as '*' or 'storage.*') are not allowed. For more
/// information see
/// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
pub permissions: Option<Vec<String>>,
}
impl RequestValue for TestIamPermissionsRequest {}
/// Request message for `SetIamPolicy` method.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [set iam policy](struct.MethodSetIamPolicyCall.html) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct SetIamPolicyRequest {
/// REQUIRED: The complete policy to be applied to the `resource`. The size of
/// the policy is limited to a few 10s of KB. An empty policy is a
/// valid policy but certain Cloud Platform services (such as Projects)
/// might reject them.
pub policy: Option<Policy>,
}
impl RequestValue for SetIamPolicyRequest {}
/// Encapsulates settings provided to GetIamPolicy.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GetPolicyOptions {
/// Optional. The policy format version to be returned.
///
/// Valid values are 0, 1, and 3. Requests specifying an invalid value will be
/// rejected.
///
/// Requests for policies with any conditional bindings must specify version 3.
/// Policies without any conditional bindings may specify any valid value or
/// leave the field unset.
///
/// To learn which resources support conditions in their IAM policies, see the
/// [IAM
/// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
#[serde(rename="requestedPolicyVersion")]
pub requested_policy_version: Option<i32>,
}
impl Part for GetPolicyOptions {}
/// Represents a textual expression in the Common Expression Language (CEL)
/// syntax. CEL is a C-like expression language. The syntax and semantics of CEL
/// are documented at https://github.com/google/cel-spec.
///
/// Example (Comparison):
///
/// ````text
/// title: "Summary size limit"
/// description: "Determines if a summary is less than 100 chars"
/// expression: "document.summary.size() < 100"
/// ````
///
/// Example (Equality):
///
/// ````text
/// title: "Requestor is owner"
/// description: "Determines if requestor is the document owner"
/// expression: "document.owner == request.auth.claims.email"
/// ````
///
/// Example (Logic):
///
/// ````text
/// title: "Public documents"
/// description: "Determine whether the document should be publicly visible"
/// expression: "document.type != 'private' && document.type != 'internal'"
/// ````
///
/// Example (Data Manipulation):
///
/// ````text
/// title: "Notification string"
/// description: "Create a notification string with a timestamp."
/// expression: "'New message received at ' + string(document.create_time)"
/// ````
///
/// The exact variables and functions that may be referenced within an expression
/// are determined by the service that evaluates it. See the service
/// documentation for additional information.
///
/// This type is not used in any activity, and only used as *part* of another schema.
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Expr {
/// Optional. Description of the expression. This is a longer text which
/// describes the expression, e.g. when hovered over it in a UI.
pub description: Option<String>,
/// Textual representation of an expression in Common Expression Language
/// syntax.
pub expression: Option<String>,
/// Optional. String indicating the location of the expression for error
/// reporting, e.g. a file name and a position in the file.
pub location: Option<String>,
/// Optional. Title for the expression, i.e. a short string describing
/// its purpose. This can be used e.g. in UIs which allow to enter the
/// expression.
pub title: Option<String>,
}
impl Part for Expr {}
/// Associates `members` with a `role`.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Binding {
/// Role that is assigned to `members`.
/// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
pub role: Option<String>,
/// The condition that is associated with this binding.
///
/// If the condition evaluates to `true`, then this binding applies to the
/// current request.
///
/// If the condition evaluates to `false`, then this binding does not apply to
/// the current request. However, a different role binding might grant the same
/// role to one or more of the members in this binding.
///
/// To learn which resources support conditions in their IAM policies, see the
/// [IAM
/// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
pub condition: Option<Expr>,
/// Specifies the identities requesting access for a Cloud Platform resource.
/// `members` can have the following values:
///
/// * `allUsers`: A special identifier that represents anyone who is
/// on the internet; with or without a Google account.
///
/// * `allAuthenticatedUsers`: A special identifier that represents anyone
/// who is authenticated with a Google account or a service account.
///
/// * `user:{emailid}`: An email address that represents a specific Google
/// account. For example, `[email protected]` .
///
///
/// * `serviceAccount:{emailid}`: An email address that represents a service
/// account. For example, `[email protected]`.
///
/// * `group:{emailid}`: An email address that represents a Google group.
/// For example, `[email protected]`.
///
/// * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
/// identifier) representing a user that has been recently deleted. For
/// example, `[email protected]?uid=123456789012345678901`. If the user is
/// recovered, this value reverts to `user:{emailid}` and the recovered user
/// retains the role in the binding.
///
/// * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
/// unique identifier) representing a service account that has been recently
/// deleted. For example,
/// `[email protected]?uid=123456789012345678901`.
/// If the service account is undeleted, this value reverts to
/// `serviceAccount:{emailid}` and the undeleted service account retains the
/// role in the binding.
///
/// * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
/// identifier) representing a Google group that has been recently
/// deleted. For example, `[email protected]?uid=123456789012345678901`. If
/// the group is recovered, this value reverts to `group:{emailid}` and the
/// recovered group retains the role in the binding.
///
///
/// * `domain:{domain}`: The G Suite domain (primary) that represents all the
/// users of that domain. For example, `google.com` or `example.com`.
///
///
pub members: Option<Vec<String>>,
}
impl Part for Binding {}
/// An Identity and Access Management (IAM) policy, which specifies access
/// controls for Google Cloud resources.
///
/// A `Policy` is a collection of `bindings`. A `binding` binds one or more
/// `members` to a single `role`. Members can be user accounts, service accounts,
/// Google groups, and domains (such as G Suite). A `role` is a named list of
/// permissions; each `role` can be an IAM predefined role or a user-created
/// custom role.
///
/// For some types of Google Cloud resources, a `binding` can also specify a
/// `condition`, which is a logical expression that allows access to a resource
/// only if the expression evaluates to `true`. A condition can add constraints
/// based on attributes of the request, the resource, or both. To learn which
/// resources support conditions in their IAM policies, see the
/// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
///
/// **JSON example:**
///
/// ````text
/// {
/// "bindings": [
/// {
/// "role": "roles/resourcemanager.organizationAdmin",
/// "members": [
/// "user:[email protected]",
/// "group:[email protected]",
/// "domain:google.com",
/// "serviceAccount:[email protected]"
/// ]
/// },
/// {
/// "role": "roles/resourcemanager.organizationViewer",
/// "members": [
/// "user:[email protected]"
/// ],
/// "condition": {
/// "title": "expirable access",
/// "description": "Does not grant access after Sep 2020",
/// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
/// }
/// }
/// ],
/// "etag": "BwWWja0YfJA=",
/// "version": 3
/// }
/// ````
///
/// **YAML example:**
///
/// ````text
/// bindings:
/// - members:
/// - user:[email protected]
/// - group:[email protected]
/// - domain:google.com
/// - serviceAccount:[email protected]
/// role: roles/resourcemanager.organizationAdmin
/// - members:
/// - user:[email protected]
/// role: roles/resourcemanager.organizationViewer
/// condition:
/// title: expirable access
/// description: Does not grant access after Sep 2020
/// expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
/// - etag: BwWWja0YfJA=
/// - version: 3
/// ````
///
/// For a description of IAM and its features, see the
/// [IAM documentation](https://cloud.google.com/iam/docs/).
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get iam policy](struct.MethodGetIamPolicyCall.html) (response)
/// * [set iam policy](struct.MethodSetIamPolicyCall.html) (response)
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Policy {
/// Associates a list of `members` to a `role`. Optionally, may specify a
/// `condition` that determines how and when the `bindings` are applied. Each
/// of the `bindings` must contain at least one member.
pub bindings: Option<Vec<Binding>>,
/// `etag` is used for optimistic concurrency control as a way to help
/// prevent simultaneous updates of a policy from overwriting each other.
/// It is strongly suggested that systems make use of the `etag` in the
/// read-modify-write cycle to perform policy updates in order to avoid race
/// conditions: An `etag` is returned in the response to `getIamPolicy`, and
/// systems are expected to put that etag in the request to `setIamPolicy` to
/// ensure that their change will be applied to the same version of the policy.
///
/// **Important:** If you use IAM Conditions, you must include the `etag` field
/// whenever you call `setIamPolicy`. If you omit this field, then IAM allows
/// you to overwrite a version `3` policy with a version `1` policy, and all of
/// the conditions in the version `3` policy are lost.
pub etag: Option<String>,
/// Specifies the format of the policy.
///
/// Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
/// are rejected.
///
/// Any operation that affects conditional role bindings must specify version
/// `3`. This requirement applies to the following operations:
///
/// * Getting a policy that includes a conditional role binding
/// * Adding a conditional role binding to a policy
/// * Changing a conditional role binding in a policy
/// * Removing any role binding, with or without a condition, from a policy
/// that includes conditions
///
/// **Important:** If you use IAM Conditions, you must include the `etag` field
/// whenever you call `setIamPolicy`. If you omit this field, then IAM allows
/// you to overwrite a version `3` policy with a version `1` policy, and all of
/// the conditions in the version `3` policy are lost.
///
/// If a policy does not include any conditions, operations on that policy may
/// specify any valid version or leave the field unset.
///
/// To learn which resources support conditions in their IAM policies, see the
/// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
pub version: Option<i32>,
}
impl ResponseResult for Policy {}
// ###################
// MethodBuilders ###
// #################
/// A builder providing access to all free methods, which are not associated with a particular resource.
/// It is not used directly, but through the `CloudIAP` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_iap1_beta1 as iap1_beta1;
///
/// # #[test] fn egal() {
/// use std::default::Default;
/// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// use iap1_beta1::CloudIAP;
///
/// let secret: ApplicationSecret = Default::default();
/// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// <MemoryStorage as Default>::default(), None);
/// let mut hub = CloudIAP::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `get_iam_policy(...)`, `set_iam_policy(...)` and `test_iam_permissions(...)`
/// // to build up your call.
/// let rb = hub.methods();
/// # }
/// ```
pub struct MethodMethods<'a, C, A>
where C: 'a, A: 'a {
hub: &'a CloudIAP<C, A>,
}
impl<'a, C, A> MethodsBuilder for MethodMethods<'a, C, A> {}
impl<'a, C, A> MethodMethods<'a, C, A> {
/// Create a builder to help you perform the following task:
///
/// Gets the access control policy for an Identity-Aware Proxy protected
/// resource.
/// More information about managing access via IAP can be found at:
/// https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `resource` - REQUIRED: The resource for which the policy is being requested.
/// See the operation documentation for the appropriate value for this field.
pub fn get_iam_policy(&self, request: GetIamPolicyRequest, resource: &str) -> MethodGetIamPolicyCall<'a, C, A> {
MethodGetIamPolicyCall {
hub: self.hub,
_request: request,
_resource: resource.to_string(),
_delegate: Default::default(),
_scopes: Default::default(),
_additional_params: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Returns permissions that a caller has on the Identity-Aware Proxy protected
/// resource. If the resource does not exist or the caller does not have
/// Identity-Aware Proxy permissions a [google.rpc.Code.PERMISSION_DENIED]
/// will be returned.
/// More information about managing access via IAP can be found at:
/// https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `resource` - REQUIRED: The resource for which the policy detail is being requested.
/// See the operation documentation for the appropriate value for this field.
pub fn test_iam_permissions(&self, request: TestIamPermissionsRequest, resource: &str) -> MethodTestIamPermissionCall<'a, C, A> {
MethodTestIamPermissionCall {
hub: self.hub,
_request: request,
_resource: resource.to_string(),
_delegate: Default::default(),
_scopes: Default::default(),
_additional_params: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Sets the access control policy for an Identity-Aware Proxy protected
/// resource. Replaces any existing policy.
/// More information about managing access via IAP can be found at:
/// https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `resource` - REQUIRED: The resource for which the policy is being specified.
/// See the operation documentation for the appropriate value for this field.
pub fn set_iam_policy(&self, request: SetIamPolicyRequest, resource: &str) -> MethodSetIamPolicyCall<'a, C, A> {
MethodSetIamPolicyCall {
hub: self.hub,
_request: request,
_resource: resource.to_string(),
_delegate: Default::default(),
_scopes: Default::default(),
_additional_params: Default::default(),
}
}
}
// ###################
// CallBuilders ###
// #################
/// Gets the access control policy for an Identity-Aware Proxy protected
/// resource.
/// More information about managing access via IAP can be found at:
/// https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
///
/// A builder for the *getIamPolicy* method.
/// It is not used directly, but through a `MethodMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_iap1_beta1 as iap1_beta1;
/// use iap1_beta1::GetIamPolicyRequest;
/// # #[test] fn egal() {
/// # use std::default::Default;
/// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// # use iap1_beta1::CloudIAP;
///
/// # let secret: ApplicationSecret = Default::default();
/// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// # <MemoryStorage as Default>::default(), None);
/// # let mut hub = CloudIAP::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = GetIamPolicyRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.methods().get_iam_policy(req, "resource")
/// .doit();
/// # }
/// ```
pub struct MethodGetIamPolicyCall<'a, C, A>
where C: 'a, A: 'a {
hub: &'a CloudIAP<C, A>,
_request: GetIamPolicyRequest,
_resource: String,
_delegate: Option<&'a mut dyn Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a, C, A> CallBuilder for MethodGetIamPolicyCall<'a, C, A> {}
impl<'a, C, A> MethodGetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken {
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut dyn Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(MethodInfo { id: "iap.getIamPolicy",
http_method: hyper::method::Method::Post });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("resource", self._resource.to_string()));
for &field in ["alt", "resource"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+resource}:getIamPolicy";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+resource}", "resource")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["resource"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = hyper::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default());
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) {
Ok(token) => token,
Err(err) => {
match dlg.token(&*err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(Error::MissingToken(err))
}
}
}
};
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Post, url.clone())
.header(UserAgent(self.hub._user_agent.clone()))
.header(auth_header.clone())
.header(ContentType(json_mime_type.clone()))
.header(ContentLength(request_size as u64))
.body(&mut request_value_reader);
dlg.pre_request();
req.send()
};
match req_result {
Err(err) => {
if let oauth2::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(Error::HttpError(err))
}
Ok(mut res) => {
if !res.status.is_success() {
let mut json_err = String::new();
res.read_to_string(&mut json_err).unwrap();
let json_server_error = json::from_str::<JsonServerError>(&json_err).ok();
let server_error = json::from_str::<ServerError>(&json_err)
.or_else(|_| json::from_str::<ErrorResponse>(&json_err).map(|r| r.error))
.ok();
if let oauth2::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<ErrorResponse>(&json_err){
Err(_) => Err(Error::Failure(res)),
Ok(serr) => Err(Error::BadRequest(serr))
}
}
let result_value = {
let mut json_response = String::new();
res.read_to_string(&mut json_response).unwrap();
match json::from_str(&json_response) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&json_response, &err);
return Err(Error::JsonDecodeError(json_response, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: GetIamPolicyRequest) -> MethodGetIamPolicyCall<'a, C, A> {
self._request = new_value;
self
}
/// REQUIRED: The resource for which the policy is being requested.
/// See the operation documentation for the appropriate value for this field.
///
/// Sets the *resource* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource(mut self, new_value: &str) -> MethodGetIamPolicyCall<'a, C, A> {
self._resource = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn Delegate) -> MethodGetIamPolicyCall<'a, C, A> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *access_token* (query-string) - OAuth access token.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *callback* (query-string) - JSONP
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *alt* (query-string) - Data format for response.
/// * *$.xgafv* (query-string) - V1 error format.
pub fn param<T>(mut self, name: T, value: T) -> MethodGetIamPolicyCall<'a, C, A>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> MethodGetIamPolicyCall<'a, C, A>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Returns permissions that a caller has on the Identity-Aware Proxy protected
/// resource. If the resource does not exist or the caller does not have
/// Identity-Aware Proxy permissions a [google.rpc.Code.PERMISSION_DENIED]
/// will be returned.
/// More information about managing access via IAP can be found at:
/// https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
///
/// A builder for the *testIamPermissions* method.
/// It is not used directly, but through a `MethodMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_iap1_beta1 as iap1_beta1;
/// use iap1_beta1::TestIamPermissionsRequest;
/// # #[test] fn egal() {
/// # use std::default::Default;
/// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// # use iap1_beta1::CloudIAP;
///
/// # let secret: ApplicationSecret = Default::default();
/// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// # <MemoryStorage as Default>::default(), None);
/// # let mut hub = CloudIAP::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = TestIamPermissionsRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.methods().test_iam_permissions(req, "resource")
/// .doit();
/// # }
/// ```
pub struct MethodTestIamPermissionCall<'a, C, A>
where C: 'a, A: 'a {
hub: &'a CloudIAP<C, A>,
_request: TestIamPermissionsRequest,
_resource: String,
_delegate: Option<&'a mut dyn Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a, C, A> CallBuilder for MethodTestIamPermissionCall<'a, C, A> {}
impl<'a, C, A> MethodTestIamPermissionCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken {
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, TestIamPermissionsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut dyn Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(MethodInfo { id: "iap.testIamPermissions",
http_method: hyper::method::Method::Post });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("resource", self._resource.to_string()));
for &field in ["alt", "resource"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+resource}:testIamPermissions";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+resource}", "resource")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["resource"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = hyper::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default());
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) {
Ok(token) => token,
Err(err) => {
match dlg.token(&*err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(Error::MissingToken(err))
}
}
}
};
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Post, url.clone())
.header(UserAgent(self.hub._user_agent.clone()))
.header(auth_header.clone())
.header(ContentType(json_mime_type.clone()))
.header(ContentLength(request_size as u64))
.body(&mut request_value_reader);
dlg.pre_request();
req.send()
};
match req_result {
Err(err) => {
if let oauth2::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(Error::HttpError(err))
}
Ok(mut res) => {
if !res.status.is_success() {
let mut json_err = String::new();
res.read_to_string(&mut json_err).unwrap();
let json_server_error = json::from_str::<JsonServerError>(&json_err).ok();
let server_error = json::from_str::<ServerError>(&json_err)
.or_else(|_| json::from_str::<ErrorResponse>(&json_err).map(|r| r.error))
.ok();
if let oauth2::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<ErrorResponse>(&json_err){
Err(_) => Err(Error::Failure(res)),
Ok(serr) => Err(Error::BadRequest(serr))
}
}
let result_value = {
let mut json_response = String::new();
res.read_to_string(&mut json_response).unwrap();
match json::from_str(&json_response) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&json_response, &err);
return Err(Error::JsonDecodeError(json_response, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: TestIamPermissionsRequest) -> MethodTestIamPermissionCall<'a, C, A> {
self._request = new_value;
self
}
/// REQUIRED: The resource for which the policy detail is being requested.
/// See the operation documentation for the appropriate value for this field.
///
/// Sets the *resource* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource(mut self, new_value: &str) -> MethodTestIamPermissionCall<'a, C, A> {
self._resource = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn Delegate) -> MethodTestIamPermissionCall<'a, C, A> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *access_token* (query-string) - OAuth access token.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *callback* (query-string) - JSONP
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *alt* (query-string) - Data format for response.
/// * *$.xgafv* (query-string) - V1 error format.
pub fn param<T>(mut self, name: T, value: T) -> MethodTestIamPermissionCall<'a, C, A>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> MethodTestIamPermissionCall<'a, C, A>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Sets the access control policy for an Identity-Aware Proxy protected
/// resource. Replaces any existing policy.
/// More information about managing access via IAP can be found at:
/// https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
///
/// A builder for the *setIamPolicy* method.
/// It is not used directly, but through a `MethodMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_iap1_beta1 as iap1_beta1;
/// use iap1_beta1::SetIamPolicyRequest;
/// # #[test] fn egal() {
/// # use std::default::Default;
/// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
/// # use iap1_beta1::CloudIAP;
///
/// # let secret: ApplicationSecret = Default::default();
/// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
/// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())),
/// # <MemoryStorage as Default>::default(), None);
/// # let mut hub = CloudIAP::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = SetIamPolicyRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.methods().set_iam_policy(req, "resource")
/// .doit();
/// # }
/// ```
pub struct MethodSetIamPolicyCall<'a, C, A>
where C: 'a, A: 'a {
hub: &'a CloudIAP<C, A>,
_request: SetIamPolicyRequest,
_resource: String,
_delegate: Option<&'a mut dyn Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a, C, A> CallBuilder for MethodSetIamPolicyCall<'a, C, A> {}
impl<'a, C, A> MethodSetIamPolicyCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken {
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Policy)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut dyn Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(MethodInfo { id: "iap.setIamPolicy",
http_method: hyper::method::Method::Post });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("resource", self._resource.to_string()));
for &field in ["alt", "resource"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+resource}:setIamPolicy";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+resource}", "resource")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["resource"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = hyper::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default());
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) {
Ok(token) => token,
Err(err) => {
match dlg.token(&*err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(Error::MissingToken(err))
}
}
}
};
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Post, url.clone())
.header(UserAgent(self.hub._user_agent.clone()))
.header(auth_header.clone())
.header(ContentType(json_mime_type.clone()))
.header(ContentLength(request_size as u64))
.body(&mut request_value_reader);
dlg.pre_request();
req.send()
};
match req_result {
Err(err) => {
if let oauth2::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(Error::HttpError(err))
}
Ok(mut res) => {
if !res.status.is_success() {
let mut json_err = String::new();
res.read_to_string(&mut json_err).unwrap();
let json_server_error = json::from_str::<JsonServerError>(&json_err).ok();
let server_error = json::from_str::<ServerError>(&json_err)
.or_else(|_| json::from_str::<ErrorResponse>(&json_err).map(|r| r.error))
.ok();
if let oauth2::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<ErrorResponse>(&json_err){
Err(_) => Err(Error::Failure(res)),
Ok(serr) => Err(Error::BadRequest(serr))
}
}
let result_value = {
let mut json_response = String::new();
res.read_to_string(&mut json_response).unwrap();
match json::from_str(&json_response) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&json_response, &err);
return Err(Error::JsonDecodeError(json_response, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: SetIamPolicyRequest) -> MethodSetIamPolicyCall<'a, C, A> {
self._request = new_value;
self
}
/// REQUIRED: The resource for which the policy is being specified.
/// See the operation documentation for the appropriate value for this field.
///
/// Sets the *resource* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn resource(mut self, new_value: &str) -> MethodSetIamPolicyCall<'a, C, A> {
self._resource = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn Delegate) -> MethodSetIamPolicyCall<'a, C, A> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *access_token* (query-string) - OAuth access token.
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *callback* (query-string) - JSONP
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *alt* (query-string) - Data format for response.
/// * *$.xgafv* (query-string) - V1 error format.
pub fn param<T>(mut self, name: T, value: T) -> MethodSetIamPolicyCall<'a, C, A>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> MethodSetIamPolicyCall<'a, C, A>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
| 43.763143 | 230 | 0.606859 |
165792b0490156848568a1a3e8beca39dc241bba | 1,984 | #![feature(raw_dylib)]
#[repr(C)]
#[derive(Clone)]
struct S {
x: u8,
y: i32,
}
#[repr(C)]
#[derive(Clone)]
struct S2 {
x: i32,
y: u8,
}
#[repr(C)]
#[derive(Clone)]
struct S3 {
x: [u8; 5],
}
#[link(name = "extern", kind = "raw-dylib")]
extern "stdcall" {
fn stdcall_fn_1(i: i32);
fn stdcall_fn_2(c: u8, f: f32);
fn stdcall_fn_3(d: f64);
fn stdcall_fn_4(i: u8, j: u8, f: f32);
fn stdcall_fn_5(a: S, b: i32);
fn stdcall_fn_6(a: Option<&S>);
fn stdcall_fn_7(a: S2, b: i32);
fn stdcall_fn_8(a: S3, b: S3);
fn stdcall_fn_9(x: u8, y: f64);
}
#[link(name = "extern", kind = "raw-dylib")]
extern "fastcall" {
fn fastcall_fn_1(i: i32);
fn fastcall_fn_2(c: u8, f: f32);
fn fastcall_fn_3(d: f64);
fn fastcall_fn_4(i: u8, j: u8, f: f32);
fn fastcall_fn_5(a: S, b: i32);
fn fastcall_fn_6(a: Option<&S>);
fn fastcall_fn_7(a: S2, b: i32);
fn fastcall_fn_8(a: S3, b: S3);
fn fastcall_fn_9(x: u8, y: f64);
}
pub fn library_function() {
unsafe {
stdcall_fn_1(14);
stdcall_fn_2(16, 3.5);
stdcall_fn_3(3.5);
stdcall_fn_4(1, 2, 3.0);
stdcall_fn_5(S { x: 1, y: 2 }, 16);
stdcall_fn_6(Some(&S { x: 10, y: 12 }));
stdcall_fn_7(S2 { x: 15, y: 16 }, 3);
stdcall_fn_8(S3 { x: [1, 2, 3, 4, 5] }, S3 { x: [6, 7, 8, 9, 10] });
stdcall_fn_9(1, 3.0);
fastcall_fn_1(14);
fastcall_fn_2(16, 3.5);
fastcall_fn_3(3.5);
fastcall_fn_4(1, 2, 3.0);
// FIXME: 91167
// rustc generates incorrect code for the calls to fastcall_fn_5 and fastcall_fn_7
// on i686-pc-windows-gnu; commenting these out until the indicated issue is fixed.
//fastcall_fn_5(S { x: 1, y: 2 }, 16);
fastcall_fn_6(Some(&S { x: 10, y: 12 }));
//fastcall_fn_7(S2 { x: 15, y: 16 }, 3);
fastcall_fn_8(S3 { x: [1, 2, 3, 4, 5] }, S3 { x: [6, 7, 8, 9, 10] });
fastcall_fn_9(1, 3.0);
}
}
| 26.453333 | 91 | 0.542843 |
efb90f0f15f916805172e5d0e4c7b601b8dc44ce | 3,060 | #[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
_reserved0: [u8; 0x0504],
#[doc = "0x504 - Write GPIO port"]
pub out: crate::Reg<out::OUT_SPEC>,
#[doc = "0x508 - Set individual bits in GPIO port"]
pub outset: crate::Reg<outset::OUTSET_SPEC>,
#[doc = "0x50c - Clear individual bits in GPIO port"]
pub outclr: crate::Reg<outclr::OUTCLR_SPEC>,
#[doc = "0x510 - Read GPIO port"]
pub in_: crate::Reg<in_::IN_SPEC>,
#[doc = "0x514 - Direction of GPIO pins"]
pub dir: crate::Reg<dir::DIR_SPEC>,
#[doc = "0x518 - DIR set register"]
pub dirset: crate::Reg<dirset::DIRSET_SPEC>,
#[doc = "0x51c - DIR clear register"]
pub dirclr: crate::Reg<dirclr::DIRCLR_SPEC>,
#[doc = "0x520 - Latch register indicating what GPIO pins that have met the criteria set in the PIN_CNF\\[n\\].SENSE registers"]
pub latch: crate::Reg<latch::LATCH_SPEC>,
#[doc = "0x524 - Select between default DETECT signal behaviour and LDETECT mode"]
pub detectmode: crate::Reg<detectmode::DETECTMODE_SPEC>,
_reserved9: [u8; 0x01d8],
#[doc = "0x700..0x780 - Description collection\\[n\\]: Configuration of GPIO pins"]
pub pin_cnf: [crate::Reg<pin_cnf::PIN_CNF_SPEC>; 32],
}
#[doc = "OUT register accessor: an alias for `Reg<OUT_SPEC>`"]
pub type OUT = crate::Reg<out::OUT_SPEC>;
#[doc = "Write GPIO port"]
pub mod out;
#[doc = "OUTSET register accessor: an alias for `Reg<OUTSET_SPEC>`"]
pub type OUTSET = crate::Reg<outset::OUTSET_SPEC>;
#[doc = "Set individual bits in GPIO port"]
pub mod outset;
#[doc = "OUTCLR register accessor: an alias for `Reg<OUTCLR_SPEC>`"]
pub type OUTCLR = crate::Reg<outclr::OUTCLR_SPEC>;
#[doc = "Clear individual bits in GPIO port"]
pub mod outclr;
#[doc = "IN register accessor: an alias for `Reg<IN_SPEC>`"]
pub type IN = crate::Reg<in_::IN_SPEC>;
#[doc = "Read GPIO port"]
pub mod in_;
#[doc = "DIR register accessor: an alias for `Reg<DIR_SPEC>`"]
pub type DIR = crate::Reg<dir::DIR_SPEC>;
#[doc = "Direction of GPIO pins"]
pub mod dir;
#[doc = "DIRSET register accessor: an alias for `Reg<DIRSET_SPEC>`"]
pub type DIRSET = crate::Reg<dirset::DIRSET_SPEC>;
#[doc = "DIR set register"]
pub mod dirset;
#[doc = "DIRCLR register accessor: an alias for `Reg<DIRCLR_SPEC>`"]
pub type DIRCLR = crate::Reg<dirclr::DIRCLR_SPEC>;
#[doc = "DIR clear register"]
pub mod dirclr;
#[doc = "LATCH register accessor: an alias for `Reg<LATCH_SPEC>`"]
pub type LATCH = crate::Reg<latch::LATCH_SPEC>;
#[doc = "Latch register indicating what GPIO pins that have met the criteria set in the PIN_CNF\\[n\\].SENSE registers"]
pub mod latch;
#[doc = "DETECTMODE register accessor: an alias for `Reg<DETECTMODE_SPEC>`"]
pub type DETECTMODE = crate::Reg<detectmode::DETECTMODE_SPEC>;
#[doc = "Select between default DETECT signal behaviour and LDETECT mode"]
pub mod detectmode;
#[doc = "PIN_CNF register accessor: an alias for `Reg<PIN_CNF_SPEC>`"]
pub type PIN_CNF = crate::Reg<pin_cnf::PIN_CNF_SPEC>;
#[doc = "Description collection\\[n\\]: Configuration of GPIO pins"]
pub mod pin_cnf;
| 45.671642 | 132 | 0.689869 |
fc6cafe55908df71c6acecdffb6d49d957b8e2da | 590 | use nasm_rs;
fn main() {
// We must _not_ do this build under the test setup, because that
// will produce a `_start` symbol that will conflict with the one
// provided by the unittest harness.
if cfg!(feature = "test") {
return;
}
let mut build = nasm_rs::Build::new();
build
.file("src/vm.S")
.file("src/boot.S")
.file("src/multiboot2_header.S")
.file("src/ap_startup.S")
.include("asm_include/")
.target("x86_64-unknown-none")
.compile("vm");
println!("cargo:rustc-link-lib=static=vm");
}
| 26.818182 | 69 | 0.583051 |
225b4a63955f3459868181f8da9dfac4341905a9 | 1,238 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use diem_crypto::HashValue;
/// Swap template-type values if 'cond'=true - useful to determine left/right parameters.
pub(crate) fn swap_if<T>(first: T, second: T, cond: bool) -> (T, T) {
if cond {
(second, first)
} else {
(first, second)
}
}
/// Return the index of the first bit that is 1 at the given depth when updates are
/// lexicographically sorted.
pub(crate) fn partition<T>(updates: &[(HashValue, T)], depth: usize) -> usize {
// Binary search for the cut-off point where the bit at this depth turns from 0 to 1.
// TODO: with stable partition_point: updates.partition_point(|&u| !u.0.bit(depth));
let (mut i, mut j) = (0, updates.len());
// Find the first index that starts with bit 1.
while i < j {
let mid = i + (j - i) / 2;
if updates[mid].0.bit(depth) {
j = mid;
} else {
i = mid + 1;
}
}
i
}
pub(crate) enum Either<A, B> {
A(A),
B(B),
}
impl<A, B> Either<A, B> {
pub fn or(cond: bool, a: A, b: B) -> Self {
if cond {
Either::A(a)
} else {
Either::B(b)
}
}
}
| 26.340426 | 89 | 0.55412 |
efbf3ff16e0f6544a55d759570efe41bb0fcee42 | 2,496 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use diem_config::config::NodeConfig;
use diem_types::on_chain_config::VMPublishingOption;
use hex::FromHex;
use rand::{rngs::StdRng, SeedableRng};
use std::path::PathBuf;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(about = "Diem Node")]
struct Args {
#[structopt(
short = "f",
long,
required_unless = "test",
help = "Path to NodeConfig"
)]
config: Option<PathBuf>,
#[structopt(long, help = "Enable a single validator testnet")]
test: bool,
#[structopt(
long,
help = "RNG Seed to use when starting single validator testnet",
parse(try_from_str = FromHex::from_hex),
requires("test")
)]
seed: Option<[u8; 32]>,
#[structopt(
long,
help = "Enable open publishing when starting single validator testnet",
requires("test")
)]
open_publishing: bool,
#[structopt(long, help = "Enabling random ports for testnet", requires("test"))]
random_ports: bool,
#[structopt(
long,
help = "Paths to module blobs to be included in genesis. Can include both files and directories",
requires("test")
)]
genesis_modules: Option<Vec<PathBuf>>,
}
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
fn main() {
let args = Args::from_args();
if args.test {
println!("Entering test mode, this should never be used in production!");
let rng = args
.seed
.map(StdRng::from_seed)
.unwrap_or_else(StdRng::from_entropy);
let publishing_option = if args.open_publishing {
Some(VMPublishingOption::open())
} else {
None
};
let genesis_modules = if let Some(module_paths) = args.genesis_modules {
diem_framework_releases::load_modules_from_paths(&module_paths)
} else {
diem_framework_releases::current_module_blobs().to_vec()
};
diem_node::load_test_environment(
args.config,
args.random_ports,
publishing_option,
genesis_modules,
rng,
);
} else {
let config = NodeConfig::load(args.config.unwrap()).expect("Failed to load node config");
println!("Using node config {:?}", &config);
diem_node::start(&config, None);
};
}
| 29.023256 | 105 | 0.611779 |
eb4120eeadb4e096bba731375e79cdcfb4e843d8 | 563 | // option1.rs
// Make me compile! Execute `rustlings hint option1` for hints
// you can modify anything EXCEPT for this function's sig
fn print_number(maybe_number: Option<u16>) {
let x = match maybe_number{
None=> return,
Some(i)=>i,
};
}
fn main() {
print_number(Some(13));
print_number(Some(99));
let mut numbers: [Option<u16>; 5]=[Some(0);5];
for iter in 0..5 {
let number_to_add: u16 = {
((iter * 1235) + 2) / (4 * 16)
};
numbers[iter as usize] = Some(number_to_add);
}
}
| 20.851852 | 62 | 0.571936 |
87bd4435d4eb7311c29dffbf97bd4f362efd0fa9 | 4,536 | use std::any::{Any, TypeId};
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
use std::fmt;
use fnv::FnvHasher;
type AnyMap = HashMap<TypeId, Box<Any + Send + Sync>, BuildHasherDefault<FnvHasher>>;
/// A type map of protocol extensions.
///
/// `Extensions` can be used by `Request` and `Response` to store
/// extra data derived from the underlying protocol.
#[derive(Default)]
pub struct Extensions {
map: AnyMap,
}
impl Extensions {
/// Create an empty `Extensions`.
#[inline]
pub fn new() -> Extensions {
Extensions {
map: HashMap::default(),
}
}
/// Insert a type into this `Extensions`.
///
/// If a extension of this type already existed, it will
/// be returned.
///
/// # Example
///
/// ```
/// # use http::Extensions;
/// let mut ext = Extensions::new();
/// assert!(ext.insert(5i32).is_none());
/// assert!(ext.insert(4u8).is_none());
/// assert_eq!(ext.insert(9i32), Some(5i32));
/// ```
pub fn insert<T: Send + Sync + 'static>(&mut self, val: T) -> Option<T> {
self.map.insert(TypeId::of::<T>(), Box::new(val))
.and_then(|boxed| {
//TODO: we can use unsafe and remove double checking the type id
(boxed as Box<Any + 'static>)
.downcast()
.ok()
.map(|boxed| *boxed)
})
}
/// Get a reference to a type previously inserted on this `Extensions`.
///
/// # Example
///
/// ```
/// # use http::Extensions;
/// let mut ext = Extensions::new();
/// assert!(ext.get::<i32>().is_none());
/// ext.insert(5i32);
///
/// assert_eq!(ext.get::<i32>(), Some(&5i32));
/// ```
pub fn get<T: Send + Sync + 'static>(&self) -> Option<&T> {
self.map.get(&TypeId::of::<T>())
//TODO: we can use unsafe and remove double checking the type id
.and_then(|boxed| (&**boxed as &(Any + 'static)).downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
///
/// # Example
///
/// ```
/// # use http::Extensions;
/// let mut ext = Extensions::new();
/// ext.insert(String::from("Hello"));
/// ext.get_mut::<String>().unwrap().push_str(" World");
///
/// assert_eq!(ext.get::<String>().unwrap(), "Hello World");
/// ```
pub fn get_mut<T: Send + Sync + 'static>(&mut self) -> Option<&mut T> {
self.map.get_mut(&TypeId::of::<T>())
//TODO: we can use unsafe and remove double checking the type id
.and_then(|boxed| (&mut **boxed as &mut (Any + 'static)).downcast_mut())
}
/// Remove a type from this `Extensions`.
///
/// If a extension of this type existed, it will be returned.
///
/// # Example
///
/// ```
/// # use http::Extensions;
/// let mut ext = Extensions::new();
/// ext.insert(5i32);
/// assert_eq!(ext.remove::<i32>(), Some(5i32));
/// assert!(ext.get::<i32>().is_none());
/// ```
pub fn remove<T: Send + Sync + 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>())
.and_then(|boxed| {
//TODO: we can use unsafe and remove double checking the type id
(boxed as Box<Any + 'static>)
.downcast()
.ok()
.map(|boxed| *boxed)
})
}
/// Clear the `Extensions` of all inserted extensions.
///
/// # Example
///
/// ```
/// # use http::Extensions;
/// let mut ext = Extensions::new();
/// ext.insert(5i32);
/// ext.clear();
///
/// assert!(ext.get::<i32>().is_none());
/// ```
#[inline]
pub fn clear(&mut self) {
self.map.clear();
}
}
impl fmt::Debug for Extensions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Extensions")
.finish()
}
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
}
| 28.708861 | 85 | 0.527116 |
f86781b9daef4d645e8060bf2a75bc294641ae40 | 30,185 | #![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_ATTACH: u32 = 14u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_BREAK: u32 = 6u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_DIVOVERFLOW: u32 = 8u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_DLLSTART: u32 = 12u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_DLLSTOP: u32 = 13u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_GPFAULT: u32 = 7u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_GPFAULT2: u32 = 21u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_INIT: u32 = 20u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_INSTRFAULT: u32 = 9u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_MODFREE: u32 = 4u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_MODLOAD: u32 = 3u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_MODMOVE: u32 = 19u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_SEGFREE: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_SEGLOAD: u32 = 0u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_SEGMOVE: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_SINGLESTEP: u32 = 5u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_STACKFAULT: u32 = 16u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_TASKSTART: u32 = 10u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_TASKSTOP: u32 = 11u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_TEMPBP: u32 = 18u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_TOOLHELP: u32 = 15u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const DBG_WOWINIT: u32 = 17u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Threading'*"]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Threading"))]
pub type DEBUGEVENTPROC = ::core::option::Option<unsafe extern "system" fn(param0: *mut super::Diagnostics::Debug::DEBUG_EVENT, param1: *mut ::core::ffi::c_void) -> u32>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_ACCELERATORS: u32 = 9u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_BITMAP: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_CURSOR: u32 = 12u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_CURSORCOMPONENT: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_DIALOG: u32 = 5u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_ERRTABLE: u32 = 11u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_FONT: u32 = 8u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_FONTDIR: u32 = 7u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_ICON: u32 = 14u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_ICONCOMPONENT: u32 = 3u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_MAX_RESOURCE: u32 = 15u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_MENU: u32 = 4u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_NAMETABLE: u32 = 15u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_RCDATA: u32 = 10u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_STRING: u32 = 6u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GD_USERDEFINED: u32 = 0u32;
#[repr(C, packed(4))]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct GLOBALENTRY {
pub dwSize: u32,
pub dwAddress: u32,
pub dwBlockSize: u32,
pub hBlock: super::super::Foundation::HANDLE,
pub wcLock: u16,
pub wcPageLock: u16,
pub wFlags: u16,
pub wHeapPresent: super::super::Foundation::BOOL,
pub hOwner: super::super::Foundation::HANDLE,
pub wType: u16,
pub wData: u16,
pub dwNext: u32,
pub dwNextAlt: u32,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for GLOBALENTRY {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for GLOBALENTRY {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GLOBAL_ALL: u32 = 0u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GLOBAL_FREE: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GLOBAL_LRU: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_BURGERMASTER: u32 = 10u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_CODE: u32 = 3u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_DATA: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_DGROUP: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_FREE: u32 = 7u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_INTERNAL: u32 = 8u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_MODULE: u32 = 6u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_RESOURCE: u32 = 5u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_SENTINEL: u32 = 9u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_TASK: u32 = 4u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const GT_UNKNOWN: u32 = 0u32;
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct IMAGE_NOTE {
pub Module: [super::super::Foundation::CHAR; 10],
pub FileName: [super::super::Foundation::CHAR; 256],
pub hModule: u16,
pub hTask: u16,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for IMAGE_NOTE {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for IMAGE_NOTE {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const MAX_MODULE_NAME: u32 = 9u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const MAX_PATH16: u32 = 255u32;
#[repr(C, packed(4))]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct MODULEENTRY {
pub dwSize: u32,
pub szModule: [super::super::Foundation::CHAR; 10],
pub hModule: super::super::Foundation::HANDLE,
pub wcUsage: u16,
pub szExePath: [super::super::Foundation::CHAR; 256],
pub wNext: u16,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for MODULEENTRY {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for MODULEENTRY {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type PROCESSENUMPROC = ::core::option::Option<unsafe extern "system" fn(dwprocessid: u32, dwattributes: u32, lpuserdefined: super::super::Foundation::LPARAM) -> super::super::Foundation::BOOL>;
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct SEGMENT_NOTE {
pub Selector1: u16,
pub Selector2: u16,
pub Segment: u16,
pub Module: [super::super::Foundation::CHAR; 10],
pub FileName: [super::super::Foundation::CHAR; 256],
pub Type: u16,
pub Length: u32,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for SEGMENT_NOTE {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for SEGMENT_NOTE {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const SN_CODE: u32 = 0u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const SN_DATA: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const SN_V86: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const STATUS_VDM_EVENT: i32 = 1073741829i32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type TASKENUMPROC = ::core::option::Option<unsafe extern "system" fn(dwthreadid: u32, hmod16: u16, htask16: u16, lpuserdefined: super::super::Foundation::LPARAM) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type TASKENUMPROCEX = ::core::option::Option<unsafe extern "system" fn(dwthreadid: u32, hmod16: u16, htask16: u16, pszmodname: *mut i8, pszfilename: *mut i8, lpuserdefined: super::super::Foundation::LPARAM) -> super::super::Foundation::BOOL>;
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct TEMP_BP_NOTE {
pub Seg: u16,
pub Offset: u32,
pub bPM: super::super::Foundation::BOOL,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for TEMP_BP_NOTE {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for TEMP_BP_NOTE {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_ALIGNMENT: u32 = 262144u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_AUXCARRY: u32 = 16u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_CARRY: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_DIRECTION: u32 = 1024u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_INTERRUPT: u32 = 512u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_IOPL: u32 = 12288u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_IOPL_BITS: u32 = 18u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_OVERFLOW: u32 = 2048u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_PARITY: u32 = 4u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_RESUME: u32 = 65536u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_SIGN: u32 = 128u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_TRACE: u32 = 256u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_V86: u32 = 131072u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const V86FLAGS_ZERO: u32 = 64u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMADDR_PM16: u32 = 4u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMADDR_PM32: u32 = 16u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMADDR_V86: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMBREAKTHREADPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE) -> super::super::Foundation::BOOL>;
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_System_Kernel'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
#[cfg(feature = "Win32_System_Kernel")]
pub struct VDMCONTEXT {
pub ContextFlags: u32,
pub Dr0: u32,
pub Dr1: u32,
pub Dr2: u32,
pub Dr3: u32,
pub Dr6: u32,
pub Dr7: u32,
pub FloatSave: super::Kernel::FLOATING_SAVE_AREA,
pub SegGs: u32,
pub SegFs: u32,
pub SegEs: u32,
pub SegDs: u32,
pub Edi: u32,
pub Esi: u32,
pub Ebx: u32,
pub Edx: u32,
pub Ecx: u32,
pub Eax: u32,
pub Ebp: u32,
pub Eip: u32,
pub SegCs: u32,
pub EFlags: u32,
pub Esp: u32,
pub SegSs: u32,
pub ExtendedRegisters: [u8; 512],
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
#[cfg(feature = "Win32_System_Kernel")]
impl ::core::marker::Copy for VDMCONTEXT {}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
#[cfg(feature = "Win32_System_Kernel")]
impl ::core::clone::Clone for VDMCONTEXT {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_System_Kernel'*"]
#[cfg(feature = "Win32_System_Kernel")]
pub struct VDMCONTEXT_WITHOUT_XSAVE {
pub ContextFlags: u32,
pub Dr0: u32,
pub Dr1: u32,
pub Dr2: u32,
pub Dr3: u32,
pub Dr6: u32,
pub Dr7: u32,
pub FloatSave: super::Kernel::FLOATING_SAVE_AREA,
pub SegGs: u32,
pub SegFs: u32,
pub SegEs: u32,
pub SegDs: u32,
pub Edi: u32,
pub Esi: u32,
pub Ebx: u32,
pub Edx: u32,
pub Ecx: u32,
pub Eax: u32,
pub Ebp: u32,
pub Eip: u32,
pub SegCs: u32,
pub EFlags: u32,
pub Esp: u32,
pub SegSs: u32,
}
#[cfg(feature = "Win32_System_Kernel")]
impl ::core::marker::Copy for VDMCONTEXT_WITHOUT_XSAVE {}
#[cfg(feature = "Win32_System_Kernel")]
impl ::core::clone::Clone for VDMCONTEXT_WITHOUT_XSAVE {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMCONTEXT_i386: u32 = 65536u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMCONTEXT_i486: u32 = 65536u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_BREAK_DEBUGGER: u32 = 16u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_BREAK_DIVIDEBYZERO: u32 = 256u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_BREAK_DOSTASK: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_BREAK_EXCEPTIONS: u32 = 8u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_BREAK_LOADDLL: u32 = 4u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_BREAK_WOWTASK: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_INITIAL_FLAGS: u32 = 256u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_MAX_SYMBOL_BUFFER: u32 = 256u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMDBG_TRACE_HISTORY: u32 = 128u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMDETECTWOWPROC = ::core::option::Option<unsafe extern "system" fn() -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMENUMPROCESSWOWPROC = ::core::option::Option<unsafe extern "system" fn(param0: PROCESSENUMPROC, param1: super::super::Foundation::LPARAM) -> i32>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMENUMTASKWOWEXPROC = ::core::option::Option<unsafe extern "system" fn(param0: u32, param1: TASKENUMPROCEX, param2: super::super::Foundation::LPARAM) -> i32>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMENUMTASKWOWPROC = ::core::option::Option<unsafe extern "system" fn(param0: u32, param1: TASKENUMPROC, param2: super::super::Foundation::LPARAM) -> i32>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMEVENT_ALLFLAGS: u32 = 57344u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMEVENT_NEEDS_INTERACTIVE: u32 = 32768u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMEVENT_PE: u32 = 8192u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMEVENT_PM16: u32 = 2u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMEVENT_V86: u32 = 1u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDMEVENT_VERBOSE: u32 = 16384u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMGETADDREXPRESSIONPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::PSTR, param1: super::super::Foundation::PSTR, param2: *mut u16, param3: *mut u32, param4: *mut u16) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Kernel'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Kernel"))]
pub type VDMGETCONTEXTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut VDMCONTEXT) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Kernel'*"]
#[cfg(target_arch = "x86")]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Kernel"))]
pub type VDMGETCONTEXTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut super::Diagnostics::Debug::CONTEXT) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMGETDBGFLAGSPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE) -> u32>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMGETMODULESELECTORPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: u32, param3: super::super::Foundation::PSTR, param4: *mut u16) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMGETPOINTERPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: u16, param3: u32, param4: super::super::Foundation::BOOL) -> u32>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMGETSEGMENTINFOPROC = ::core::option::Option<unsafe extern "system" fn(param0: u16, param1: u32, param2: super::super::Foundation::BOOL, param3: VDM_SEGINFO) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMGETSELECTORMODULEPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: u16, param3: *mut u32, param4: super::super::Foundation::PSTR, param5: u32, param6: super::super::Foundation::PSTR, param7: u32) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMGETSYMBOLPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::PSTR, param1: u16, param2: u32, param3: super::super::Foundation::BOOL, param4: super::super::Foundation::BOOL, param5: super::super::Foundation::PSTR, param6: *mut u32) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug"))]
pub type VDMGETTHREADSELECTORENTRYPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: u32, param3: *mut VDMLDT_ENTRY) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug'*"]
#[cfg(target_arch = "x86")]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug"))]
pub type VDMGETTHREADSELECTORENTRYPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: u32, param3: *mut super::Diagnostics::Debug::LDT_ENTRY) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Threading'*"]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Threading"))]
pub type VDMGLOBALFIRSTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut GLOBALENTRY, param3: u16, param4: DEBUGEVENTPROC, param5: *mut ::core::ffi::c_void) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Threading'*"]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Threading"))]
pub type VDMGLOBALNEXTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut GLOBALENTRY, param3: u16, param4: DEBUGEVENTPROC, param5: *mut ::core::ffi::c_void) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMISMODULELOADEDPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::PSTR) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMKILLWOWPROC = ::core::option::Option<unsafe extern "system" fn() -> super::super::Foundation::BOOL>;
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
pub struct VDMLDT_ENTRY {
pub LimitLow: u16,
pub BaseLow: u16,
pub HighWord: VDMLDT_ENTRY_0,
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::marker::Copy for VDMLDT_ENTRY {}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::clone::Clone for VDMLDT_ENTRY {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
pub union VDMLDT_ENTRY_0 {
pub Bytes: VDMLDT_ENTRY_0_1,
pub Bits: VDMLDT_ENTRY_0_0,
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::marker::Copy for VDMLDT_ENTRY_0 {}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::clone::Clone for VDMLDT_ENTRY_0 {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
pub struct VDMLDT_ENTRY_0_0 {
pub _bitfield: u32,
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::marker::Copy for VDMLDT_ENTRY_0_0 {}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::clone::Clone for VDMLDT_ENTRY_0_0 {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
pub struct VDMLDT_ENTRY_0_1 {
pub BaseMid: u8,
pub Flags1: u8,
pub Flags2: u8,
pub BaseHi: u8,
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::marker::Copy for VDMLDT_ENTRY_0_1 {}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
impl ::core::clone::Clone for VDMLDT_ENTRY_0_1 {
fn clone(&self) -> Self {
*self
}
}
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Threading'*"]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Threading"))]
pub type VDMMODULEFIRSTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut MODULEENTRY, param3: DEBUGEVENTPROC, param4: *mut ::core::ffi::c_void) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Threading'*"]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Threading"))]
pub type VDMMODULENEXTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut MODULEENTRY, param3: DEBUGEVENTPROC, param4: *mut ::core::ffi::c_void) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Threading'*"]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Threading"))]
pub type VDMPROCESSEXCEPTIONPROC = ::core::option::Option<unsafe extern "system" fn(param0: *mut super::Diagnostics::Debug::DEBUG_EVENT) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Kernel'*"]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Kernel"))]
pub type VDMSETCONTEXTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut VDMCONTEXT) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation', 'Win32_System_Diagnostics_Debug', 'Win32_System_Kernel'*"]
#[cfg(target_arch = "x86")]
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Diagnostics_Debug", feature = "Win32_System_Kernel"))]
pub type VDMSETCONTEXTPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: super::super::Foundation::HANDLE, param2: *mut super::Diagnostics::Debug::CONTEXT) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMSETDBGFLAGSPROC = ::core::option::Option<unsafe extern "system" fn(param0: super::super::Foundation::HANDLE, param1: u32) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMSTARTTASKINWOWPROC = ::core::option::Option<unsafe extern "system" fn(param0: u32, param1: super::super::Foundation::PSTR, param2: u16) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub type VDMTERMINATETASKINWOWPROC = ::core::option::Option<unsafe extern "system" fn(param0: u32, param1: u16) -> super::super::Foundation::BOOL>;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDM_KGDT_R3_CODE: u32 = 24u32;
#[doc = "*Required features: 'Win32_System_VirtualDosMachines'*"]
pub const VDM_MAXIMUM_SUPPORTED_EXTENSION: u32 = 512u32;
#[repr(C)]
#[doc = "*Required features: 'Win32_System_VirtualDosMachines', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub struct VDM_SEGINFO {
pub Selector: u16,
pub SegNumber: u16,
pub Length: u32,
pub Type: u16,
pub ModuleName: [super::super::Foundation::CHAR; 9],
pub FileName: [super::super::Foundation::CHAR; 255],
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for VDM_SEGINFO {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for VDM_SEGINFO {
fn clone(&self) -> Self {
*self
}
}
| 54.981785 | 341 | 0.730462 |
4b9a50ebddf7452f6221faacfeac6019a7a2259d | 2,275 | #![warn(
missing_debug_implementations,
nonstandard_style,
rust_2018_idioms,
unreachable_pub
)]
#![feature(
associated_type_defaults,
backtrace,
box_patterns,
box_syntax,
error_iter,
never_type,
pattern,
stdsimd,
trait_alias,
type_alias_impl_trait,
try_blocks,
try_trait
)]
#![recursion_limit = "512"]
#[macro_use]
mod macros;
cfg_common! {
mod helpers;
use proc_macro::TokenStream;
use proc_macro_error::proc_macro_error;
use syn::parse_macro_input;
}
cfg_runtime! {
mod runtime;
use runtime::entrypoint::{
MainEntryPointArgs,
MainEntryPoint,
TestEntryPoint,
TestEntryPointArgs
};
use syn::{
ItemFn,
};
#[proc_macro_error]
#[proc_macro_attribute]
pub fn runtime_entrypoint_main(args: TokenStream, input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as ItemFn);
runtime::entrypoint::set_fn_dummy(&item);
let attr_args = parse_macro_input!(args as MainEntryPointArgs);
TokenStream::from(MainEntryPoint::new(attr_args, item))
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn runtime_entrypoint_test(args: TokenStream, input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as ItemFn);
runtime::entrypoint::set_fn_dummy(&item);
let attr_args = parse_macro_input!(args as TestEntryPointArgs);
TokenStream::from(TestEntryPoint::new(attr_args, item))
}
}
cfg_view! {
mod view;
#[proc_macro_error]
#[proc_macro_derive(Element, attributes(element))]
pub fn view_element(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as view::Element);
TokenStream::from(item)
}
#[proc_macro_error]
#[proc_macro]
pub fn view_define_element(input: TokenStream) -> TokenStream {
let declaration = parse_macro_input!(input as view::ElementDefinition);
TokenStream::from(declaration)
}
#[proc_macro_error]
#[proc_macro]
pub fn view_define_elements(input: TokenStream) -> TokenStream {
let declaration = parse_macro_input!(input as view::ElementDefinitions);
TokenStream::from(declaration)
}
}
| 26.764706 | 90 | 0.674286 |
89a6c5ce2769b0fd7008f95be839b20566009ed4 | 4,139 | use ansi_term::Colour::{Blue};
pub enum TextPart {
MatchedText(String),
Text(String),
}
impl TextPart {
/// The number of characters in this text part.
pub fn len(&self) -> usize {
self.text().chars().count()
}
/// The raw text
pub fn text(&self) -> &String {
match self {
TextPart::MatchedText(val) => val,
TextPart::Text(val) => val,
}
}
/// The total number of characters in all the text parts in the vector.
pub fn vec_len(parts: &[TextPart]) -> usize {
parts.iter().map(|part| part.len()).sum()
}
/// Highlighted the matched text
pub fn pretty_string(parts: &[TextPart]) -> String {
let mut out = String::new();
for part in parts {
match part {
TextPart::MatchedText(val) => out.push_str(Blue.paint(val).to_string().as_str()),
TextPart::Text(val) => out.push_str(val.as_str()),
};
}
out
}
}
pub struct MatchedBookmark {
pub url: Vec<TextPart>,
pub description: Vec<TextPart>,
pub tags: Vec<String>,
}
impl MatchedBookmark {
pub fn new_tags_only(
url: &str,
description: &str,
tags: Vec<String>,
) -> Self {
MatchedBookmark::new(
vec![TextPart::Text(String::from(url))],
vec![TextPart::Text(String::from(description))],
tags,
)
}
pub fn new(
url: Vec<TextPart>,
description: Vec<TextPart>,
mut tags: Vec<String>,
) -> Self {
// Sort tags case insensitively for output, but display in their original case
tags.sort_by_key(|a| a.to_lowercase());
Self {
url,
description,
tags,
}
}
/// Number of characters in URL (without formatting)
fn url_len(&self) -> usize {
TextPart::vec_len(&self.url)
}
/// Formatted URL for displaying on the terminal
fn url_pretty_string(&self) -> String {
TextPart::pretty_string(&self.url)
}
/// Number of characters in description (without formatting)
fn description_len(&self) -> usize {
TextPart::vec_len(&self.description)
}
/// Formatted description for displaying on the terminal
fn description_pretty_string(&self) -> String {
TextPart::pretty_string(&self.description)
}
/// Formatted tags for displaying on the terminal
fn tags_pretty_string(&self) -> String {
self.tags.join(" | ")
}
}
pub struct SearchResultOutput {
url_max: usize,
desc_max: usize,
lines: Vec<MatchedBookmark>,
}
impl SearchResultOutput {
pub fn new() -> Self {
Self {
url_max: 0,
desc_max: 0,
lines: Vec::new(),
}
}
pub fn add_matched_bookmark(
&mut self,
matched_bookmark: MatchedBookmark,
) {
let url_len = matched_bookmark.url_len();
if url_len > self.url_max {
self.url_max = url_len;
}
let desc_len = matched_bookmark.description_len();
if desc_len > self.desc_max {
self.desc_max = desc_len;
}
self.lines.push(matched_bookmark);
}
pub fn print(&self) {
for line in &self.lines {
// Can't use println formatting width because gets messed up by colored lines
println!(
"{}{} {}{} {}",
line.url_pretty_string(),
generate_padding(line.url_len(), self.url_max),
line.description_pretty_string(),
generate_padding(line.description_len(), self.desc_max),
line.tags_pretty_string(),
);
}
}
}
// Inspiration: https://docs.rs/crate/tabwriter/1.2.1/source/src/lib.rs
fn generate_padding(current_len: usize, pad_to: usize) -> String {
" ".repeat(pad_to - current_len)
}
#[cfg(test)]
mod tests {
use crate::cli_output::search_result_output::generate_padding;
#[test]
fn test_generate_padding() {
assert_eq!(generate_padding(7, 10), " ");
}
} | 25.549383 | 97 | 0.563904 |
295c9b7c7b2616e2df7949f6091178f4b6a0a141 | 5,250 | // TryFrom is a simple and safe type conversion that may fail in a controlled way under some circumstances.
// Basically, this is the same as From. The main difference is that this should return a Result type
// instead of the target type itself.
// You can read more about it at https://doc.rust-lang.org/std/convert/trait.TryFrom.html
use std::convert::{TryFrom, TryInto};
use std::{error, fmt};
#[derive(Debug, PartialEq)]
struct Color {
red: u8,
green: u8,
blue: u8,
}
#[derive(Debug)]
struct ColorError;
impl fmt::Display for ColorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "invalid first item to double")
}
}
impl error::Error for ColorError {}
// Your task is to complete this implementation
// and return an Ok result of inner type Color.
// You need to create an implementation for a tuple of three integers,
// an array of three integers and a slice of integers.
//
// Note that the implementation for tuple and array will be checked at compile time,
// but the slice implementation needs to check the slice length!
// Also note that correct RGB color values must be integers in the 0..=255 range.
// Tuple implementation
impl TryFrom<(i16, i16, i16)> for Color {
type Error = Box<dyn error::Error>;
fn try_from(tuple: (i16, i16, i16)) -> Result<Self, Self::Error> {
let (r, g, b) = tuple;
if r >= 0 && r <= 255 && g >= 0 && g <= 255 && b >= 0 && b <= 255 {
Ok(Color{
red: r as u8,
green: g as u8,
blue: b as u8
})
} else {
Err(Box::new(ColorError))
}
}
}
// Array implementation
impl TryFrom<[i16; 3]> for Color {
type Error = Box<dyn error::Error>;
fn try_from(arr: [i16; 3]) -> Result<Self, Self::Error> {
let [r, g, b] = arr;
Color::try_from((r, g, b))
}
}
// Slice implementation
impl TryFrom<&[i16]> for Color {
type Error = Box<dyn error::Error>;
fn try_from(slice: &[i16]) -> Result<Self, Self::Error> {
if slice.len() != 3 {
return Err(Box::new(ColorError));
}
Color::try_from((slice[0], slice[1], slice[2]))
}
}
fn main() {
// Use the `from` function
let c1 = Color::try_from((183, 65, 14));
println!("{:?}", c1);
// Since From is implemented for Color, we should be able to use Into
let c2: Result<Color, _> = [183, 65, 14].try_into();
println!("{:?}", c2);
let v = vec![183, 65, 14];
// With slice we should use `from` function
let c3 = Color::try_from(&v[..]);
println!("{:?}", c3);
// or take slice within round brackets and use Into
let c4: Result<Color, _> = (&v[..]).try_into();
println!("{:?}", c4);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tuple_out_of_range_positive() {
assert!(Color::try_from((256, 1000, 10000)).is_err());
}
#[test]
fn test_tuple_out_of_range_negative() {
assert!(Color::try_from((-1, -10, -256)).is_err());
}
#[test]
fn test_tuple_sum() {
assert!(Color::try_from((-1, 255, 255)).is_err());
}
#[test]
fn test_tuple_correct() {
let c: Result<Color, _> = (183, 65, 14).try_into();
assert!(c.is_ok());
assert_eq!(
c.unwrap(),
Color {
red: 183,
green: 65,
blue: 14
}
);
}
#[test]
fn test_array_out_of_range_positive() {
let c: Result<Color, _> = [1000, 10000, 256].try_into();
assert!(c.is_err());
}
#[test]
fn test_array_out_of_range_negative() {
let c: Result<Color, _> = [-10, -256, -1].try_into();
assert!(c.is_err());
}
#[test]
fn test_array_sum() {
let c: Result<Color, _> = [-1, 255, 255].try_into();
assert!(c.is_err());
}
#[test]
fn test_array_correct() {
let c: Result<Color, _> = [183, 65, 14].try_into();
assert!(c.is_ok());
assert_eq!(
c.unwrap(),
Color {
red: 183,
green: 65,
blue: 14
}
);
}
#[test]
fn test_slice_out_of_range_positive() {
let arr = [10000, 256, 1000];
assert!(Color::try_from(&arr[..]).is_err());
}
#[test]
fn test_slice_out_of_range_negative() {
let arr = [-256, -1, -10];
assert!(Color::try_from(&arr[..]).is_err());
}
#[test]
fn test_slice_sum() {
let arr = [-1, 255, 255];
assert!(Color::try_from(&arr[..]).is_err());
}
#[test]
fn test_slice_correct() {
let v = vec![183, 65, 14];
let c: Result<Color, _> = Color::try_from(&v[..]);
assert!(c.is_ok());
assert_eq!(
c.unwrap(),
Color {
red: 183,
green: 65,
blue: 14
}
);
}
#[test]
fn test_slice_excess_length() {
let v = vec![0, 0, 0, 0];
assert!(Color::try_from(&v[..]).is_err());
}
#[test]
fn test_slice_insufficient_length() {
let v = vec![0, 0];
assert!(Color::try_from(&v[..]).is_err());
}
}
| 28.074866 | 107 | 0.531619 |
e45d9d6055520d5c7f509c2e7debbb62d649dad1 | 605 | // xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::deque;
use std::deque::Deque;
pub fn main() {
let Q = deque::create();
Q.add_back(10);
}
| 28.809524 | 68 | 0.712397 |
ccad9797a39ca72f718480e653899027e02335c2 | 13,504 | use super::utils::sign_send_instructions;
use crate::common::context::Context;
use audaces_protocol::{
instruction::{
add_budget, add_instance, add_page, close_account, close_position, collect_garbage,
crank_funding, crank_liquidation, create_market, extract_funding, increase_position,
open_position, rebalance, transfer_position, transfer_user_account, withdraw_budget,
},
instruction::{InstanceContext, PositionInfo},
state::PositionType,
};
use solana_program::{pubkey::Pubkey, system_instruction::create_account};
use solana_sdk::{signature::Keypair, signer::Signer, transport::TransportError};
impl Context {
pub async fn create_market(
&mut self,
market_symbol: String,
initial_v_pc_amount: u64,
coin_decimals: u8,
quote_decimals: u8,
) -> Result<(), TransportError> {
let create_market_instruction = create_market(
&self.market_ctx,
market_symbol,
initial_v_pc_amount,
coin_decimals,
quote_decimals,
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![create_market_instruction],
vec![],
)
.await
}
pub async fn add_instance(
&mut self,
nb_pages_per_instance: u8,
space_per_page: u64,
) -> Result<(), TransportError> {
let instance_keypair = Keypair::new();
let mut instructions = vec![create_account(
&self.prg_test_ctx.payer.pubkey(),
&instance_keypair.pubkey(),
1_000_000,
1_000_000,
&self.market_ctx.audaces_protocol_program_id,
)];
let mut signers = vec![];
let mut signers_ref = vec![&instance_keypair];
let mut pages_pubkeys = vec![];
for _ in 0..nb_pages_per_instance {
let page_keypair = Keypair::new();
pages_pubkeys.push(page_keypair.pubkey());
instructions.push(create_account(
&self.prg_test_ctx.payer.pubkey(),
&page_keypair.pubkey(),
1_000_000,
space_per_page,
&self.market_ctx.audaces_protocol_program_id,
));
signers.push(page_keypair);
}
instructions.push(add_instance(
&self.market_ctx,
instance_keypair.pubkey(),
&pages_pubkeys,
));
signers_ref.push(&self.test_ctx.market_admin_keypair);
self.market_ctx.instances.push(InstanceContext {
instance_account: instance_keypair.pubkey(),
memory_pages: pages_pubkeys,
});
sign_send_instructions(
&mut self.prg_test_ctx,
instructions,
signers.iter().chain(signers_ref).collect::<Vec<&Keypair>>(),
)
.await
}
pub async fn add_budget(
&mut self,
amount: u64,
user_account_index: usize,
) -> Result<(), TransportError> {
let add_budget_instruction = add_budget(
&self.market_ctx,
amount,
self.user_ctx.owner_account.pubkey(),
self.user_ctx.usdc_account,
self.user_ctx.user_accounts[user_account_index],
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![add_budget_instruction],
vec![&self.user_ctx.owner_account],
)
.await
}
pub async fn withdraw_budget(
&mut self,
amount: u64,
user_account_index: usize,
) -> Result<(), TransportError> {
let withdraw_budget_instruction = withdraw_budget(
&self.market_ctx,
amount,
self.user_ctx.usdc_account,
self.user_ctx.owner_account.pubkey(),
self.user_ctx.user_accounts[user_account_index],
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![withdraw_budget_instruction],
vec![&self.user_ctx.owner_account],
)
.await
}
pub async fn open_position(
&mut self,
side: PositionType,
collateral: u64,
leverage: u64,
instance_index: u8,
user_account_index: usize,
) -> Result<(), TransportError> {
let open_position_instruction = open_position(
&self.market_ctx,
&PositionInfo {
user_account: self.user_ctx.user_accounts[user_account_index],
user_account_owner: self.user_ctx.owner_account.pubkey(),
instance_index,
side,
},
collateral,
leverage,
0,
u64::MAX,
None,
None,
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![open_position_instruction],
vec![&self.user_ctx.owner_account],
)
.await
}
pub async fn increase_position(
&mut self,
collateral: u64,
leverage: u64,
position_index: u16,
instance_index: u8,
user_account_index: usize,
) -> Result<(), TransportError> {
let increase_position_instruction = increase_position(
&self.market_ctx,
collateral,
leverage,
instance_index,
position_index,
self.user_ctx.owner_account.pubkey(),
self.user_ctx.user_accounts[user_account_index],
0,
u64::MAX,
None,
None,
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![increase_position_instruction],
vec![&self.user_ctx.owner_account],
)
.await
}
pub async fn close_position(
&mut self,
closing_collateral: u64,
closing_v_coin: u64,
position_index: u16,
user_account_index: usize,
) -> Result<(), TransportError> {
let position = self
.get_position(position_index, user_account_index)
.await
.unwrap();
let close_position_instruction = close_position(
&self.market_ctx,
&PositionInfo {
user_account: self.user_ctx.user_accounts[user_account_index],
user_account_owner: self.user_ctx.owner_account.pubkey(),
instance_index: position.instance_index,
side: position.side,
},
closing_collateral,
closing_v_coin,
position_index,
0,
u64::MAX,
None,
None,
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![close_position_instruction],
vec![&self.user_ctx.owner_account],
)
.await
}
pub async fn liquidate(&mut self, instance_index: u8) -> Result<(), TransportError> {
let liquidate_instruction =
crank_liquidation(&self.market_ctx, instance_index, self.user_ctx.usdc_account);
sign_send_instructions(&mut self.prg_test_ctx, vec![liquidate_instruction], vec![]).await
}
pub async fn collect_garbage(
&mut self,
instance_index: u8,
max_iterations: u64,
) -> Result<(), TransportError> {
let collect_garbage_instruction = collect_garbage(
&self.market_ctx,
instance_index,
max_iterations,
self.user_ctx.usdc_account,
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![collect_garbage_instruction],
vec![],
)
.await
}
pub async fn crank_funding(&mut self) -> Result<(), TransportError> {
let crank_funding_instruction = crank_funding(&self.market_ctx);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![crank_funding_instruction],
vec![],
)
.await
}
pub async fn extract_funding(
&mut self,
instance_index: u8,
user_account_index: usize,
) -> Result<(), TransportError> {
let crank_funding_instruction = extract_funding(
&self.market_ctx,
instance_index,
self.user_ctx.user_accounts[user_account_index],
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![crank_funding_instruction],
vec![],
)
.await
}
pub async fn close_account(
&mut self,
lamports_target: Pubkey,
user_account_index: usize,
) -> Result<(), TransportError> {
let close_account_instruction = close_account(
&self.market_ctx,
self.user_ctx.user_accounts[user_account_index],
self.user_ctx.owner_account.pubkey(),
lamports_target,
);
sign_send_instructions(
&mut self.prg_test_ctx,
vec![close_account_instruction],
vec![&self.user_ctx.owner_account],
)
.await
}
pub async fn add_page(&mut self, instance_index: u8, space: u64) -> Result<(), TransportError> {
let page_keypair = Keypair::new();
let instructions = vec![
create_account(
&self.prg_test_ctx.payer.pubkey(),
&page_keypair.pubkey(),
1_000_000,
space,
&self.market_ctx.audaces_protocol_program_id,
),
add_page(&self.market_ctx, instance_index, page_keypair.pubkey()),
];
let signers = vec![&page_keypair, &self.test_ctx.market_admin_keypair];
self.market_ctx.instances[instance_index as usize]
.memory_pages
.push(page_keypair.pubkey());
sign_send_instructions(&mut self.prg_test_ctx, instructions, signers).await
}
pub async fn rebalance(
&mut self,
instance_index: u8,
collateral: u64,
user_account_index: usize,
) -> Result<(), TransportError> {
let instructions = vec![rebalance(
&self.market_ctx,
self.user_ctx.user_accounts[user_account_index],
self.user_ctx.owner_account.pubkey(),
instance_index,
collateral,
)];
let signers = vec![
&self.user_ctx.owner_account,
&self.test_ctx.market_admin_keypair,
];
sign_send_instructions(&mut self.prg_test_ctx, instructions, signers).await
}
pub async fn transfer_user_account(
&mut self,
new_user_account_owner: Keypair,
user_account_index: usize,
) -> Result<(), TransportError> {
let instructions = vec![transfer_user_account(
&self.market_ctx,
self.user_ctx.user_accounts[user_account_index],
self.user_ctx.owner_account.pubkey(),
new_user_account_owner.pubkey(),
)];
let signers = vec![&self.user_ctx.owner_account];
let r = sign_send_instructions(&mut self.prg_test_ctx, instructions, signers).await;
self.user_ctx.owner_account = new_user_account_owner;
return r;
}
pub async fn transfer_position_to_new_user(
&mut self,
position_index: u16,
user_account_index: usize,
) -> Result<(), TransportError> {
let new_user_account = Keypair::new();
let create_instruction = vec![create_account(
&self.prg_test_ctx.payer.pubkey(),
&new_user_account.pubkey(),
1_000_000,
1_000_000,
&self.market_ctx.audaces_protocol_program_id,
)];
let signers = vec![&new_user_account];
sign_send_instructions(&mut self.prg_test_ctx, create_instruction, signers)
.await
.unwrap();
let transfer_instruction = vec![transfer_position(
&self.market_ctx,
position_index,
self.user_ctx.user_accounts[user_account_index],
self.user_ctx.owner_account.pubkey(),
new_user_account.pubkey(),
self.user_ctx.owner_account.pubkey(),
)];
self.user_ctx.user_accounts.push(new_user_account.pubkey());
self.add_budget(10_000_000, self.user_ctx.user_accounts.len() - 1)
.await
.unwrap();
let signers = vec![&self.user_ctx.owner_account];
let r = sign_send_instructions(&mut self.prg_test_ctx, transfer_instruction, signers).await;
return r;
}
pub async fn create_user_accounts(
&mut self,
nb_new_accounts: usize,
) -> Result<(), TransportError> {
let mut instructions = vec![];
let mut signers: Vec<Keypair> = vec![];
for _ in 0..nb_new_accounts {
let new_user_account = Keypair::new();
instructions.push(create_account(
&self.prg_test_ctx.payer.pubkey(),
&new_user_account.pubkey(),
1_000_000,
1_000_000,
&self.market_ctx.audaces_protocol_program_id,
));
signers.push(new_user_account);
}
let signers_ref: Vec<&Keypair> = signers.iter().collect();
self.user_ctx
.user_accounts
.append(&mut signers.iter().map(|k| k.pubkey()).collect());
sign_send_instructions(&mut self.prg_test_ctx, instructions, signers_ref).await
}
}
| 32.383693 | 100 | 0.578866 |
11bf3fd6faa7263342cca502fd026a6e37b50f39 | 2,918 | use std::env;
use std::fs::read_to_string;
use std::path::Path;
use std::process::Command;
use anyhow::{anyhow, ensure, Context, Result};
use ignore::Walk;
use serde_derive::Deserialize;
use toml;
use tr::tr;
fn main() {
//TODO: fix this disabled building of the wasm frontend
// build_wasm_frontend().expect("error while building the wasm front-end");
}
#[derive(Deserialize)]
struct BuildConfig {}
fn read_toml_config() -> Result<BuildConfig> {
let toml_path = Path::new("build.toml");
let toml_str = read_to_string(toml_path).context("trouble reading Cargo.toml")?;
let config: BuildConfig =
toml::from_str(toml_str.as_ref()).context("trouble parsing Cargo.toml")?;
println!("cargo:rerun-if-changed=Build.toml");
Ok(config)
}
/// Build the wasm component used for the front end of the website.
/// Requires `wasm-pack` CLI, `xtr`, and GNU Gettext CLI tools
/// `msginit`, `msgfmt`, `msgmerge` and `msgcat` to be present in your
/// system path.
///
/// Runs the command `wasm-pack build --target web --out-dir
/// ../public/js/gui`
fn build_wasm_frontend() -> Result<()> {
let config = read_toml_config()?;
println!("cargo:rerun-if-changed=Build.toml");
ensure_gui_watch_rerun()?;
build_wasm()?;
// enable panic here for debugging due to a stupid feature where
// stdout from this module isn't even included in cargo build -vv.
// panic!("debugging");
Ok(())
}
/// Ensure that this script runs every time something within the gui
/// crate changes.
fn ensure_gui_watch_rerun() -> Result<()> {
println!("cargo:rerun-if-changed=gui/Cargo.lock");
for result in Walk::new("gui/") {
match result {
Ok(entry) => {
println!("cargo:rerun-if-changed={}", entry.path().display());
}
Err(err) => return Err(anyhow!("error walking directory gui/: {}", err)),
}
}
Ok(())
}
fn run_command_and_check_success(command_name: &str, mut command: Command) -> Result<()> {
let output = command
.spawn()
.with_context(|| tr!("the {0} command was unable to start", command_name))?
.wait_with_output()
.with_context(|| {
tr!(
"the {0} command had a problem waiting for output",
command_name
)
})?;
ensure!(
output.status.success(),
tr!(
"the {0} command reported that it was unsuccessful",
command_name
)
);
Ok(())
}
fn build_wasm() -> Result<()> {
let profile: String = env::var("PROFILE").unwrap();
let mut wasm_pack = Command::new("wasm-pack");
wasm_pack.current_dir("./gui");
wasm_pack.args(&["build", "--target", "web", "--out-dir", "../public/js/gui"]);
if profile == "debug" {
wasm_pack.arg("--dev");
}
run_command_and_check_success("wasm-pack", wasm_pack)?;
Ok(())
}
| 29.474747 | 90 | 0.609664 |
e6f444591d1059c9f4cf9d3aaea468a9d64bdce1 | 5,184 | use binprot::{BinProtRead, BinProtWrite};
use std::collections::BTreeMap;
use std::sync::Arc;
use tokio::io::AsyncReadExt;
use tokio::net::TcpStream;
use tokio::sync::{oneshot, Mutex};
use crate::error::Error;
use crate::protocol::*;
use crate::read_write::*;
type OneShots = BTreeMap<i64, oneshot::Sender<Result<ClientRpcResult, Error>>>;
pub struct RpcClient {
w: Arc<Mutex<WriteOrClosed>>,
buf: Vec<u8>,
id_and_oneshots: Arc<Mutex<(i64, OneShots)>>,
}
impl RpcClient {
pub async fn new<A: tokio::net::ToSocketAddrs>(addr: A) -> Result<Self, Error> {
let stream = TcpStream::connect(addr).await?;
let (mut r, w) = stream.into_split();
let w = Arc::new(Mutex::new(WriteOrClosed::Write(w)));
let mut buf = vec![0u8; 128];
let handshake: Handshake = read_bin_prot(&mut r, &mut buf).await?;
tracing::debug!("Handshake: {:?}", handshake);
write_bin_prot(&w, &Handshake(vec![RPC_MAGIC_NUMBER, 1]), &mut buf).await?;
let oneshots: OneShots = BTreeMap::new();
let id_and_oneshots = Arc::new(Mutex::new((0i64, oneshots)));
let _heartbeat_done = spawn_heartbeat_thread(w.clone());
let id_and_os = id_and_oneshots.clone();
tokio::spawn(async move {
let mut buf = vec![0u8; 128];
let mut recv_bytes = [0u8; 8];
loop {
if let Err(err) = r.read_exact(&mut recv_bytes).await {
tracing::error!("socket read error: {:?}", err);
break;
};
let recv_len = i64::from_le_bytes(recv_bytes);
buf.resize(recv_len as usize, 0u8);
if let Err(err) = r.read_exact(&mut buf).await {
tracing::error!("socket read error: {:?}", err);
break;
}
let msg = match ClientMessage::<String, ()>::binprot_read(&mut buf.as_slice()) {
Err(err) => {
tracing::error!("unexpected message format: {:?}", err);
break;
}
Ok(msg) => msg,
};
tracing::debug!("client received: {:?}", msg);
match msg {
ClientMessage::Heartbeat => {}
ClientMessage::Query(_) => {
tracing::error!("client received an unexpected query");
}
ClientMessage::ClientResponse(resp) => {
let mut id_and_oneshots = id_and_os.lock().await;
let (_id, oneshots) = &mut *id_and_oneshots;
match oneshots.remove(&resp.id) {
None => {
tracing::error!("client received an unexpected id: {}", resp.id);
}
Some(tx) => {
if let Err(err) = tx.send(Ok(resp.data)) {
tracing::error!("client tx error: {:?}", err)
}
}
}
}
}
}
let mut id_and_oneshots = id_and_os.lock().await;
let id = id_and_oneshots.0;
let (_id, oneshots) = std::mem::replace(&mut *id_and_oneshots, (id, BTreeMap::new()));
for (_key, tx) in oneshots.into_iter() {
let error = std::io::Error::new(
std::io::ErrorKind::ConnectionAborted,
"connection closed by server",
);
if let Err(err) = tx.send(Err(error.into())) {
tracing::error!("client tx error: {:?}", err)
}
}
});
Ok(RpcClient { w, buf, id_and_oneshots })
}
// Registers a fresh id and get back both the id and the
// reader.
async fn register_new_id(
&mut self,
) -> (i64, oneshot::Receiver<Result<ClientRpcResult, Error>>) {
let mut id_and_oneshots = self.id_and_oneshots.lock().await;
let id_and_oneshots = &mut *id_and_oneshots;
let (tx, rx) = oneshot::channel();
let fresh_id = id_and_oneshots.0;
id_and_oneshots.1.insert(fresh_id, tx);
id_and_oneshots.0 += 1;
(fresh_id, rx)
}
pub async fn dispatch<Q, R>(&mut self, rpc_tag: &str, version: i64, q: Q) -> Result<R, Error>
where
Q: BinProtWrite,
R: BinProtRead + Send + Sync,
{
let (id, rx) = self.register_new_id().await;
let message = ClientMessage::Query(Query::<&str, Q> {
rpc_tag,
version,
id,
data: binprot::WithLen(q),
});
write_bin_prot(&self.w, &message, &mut self.buf).await?;
let rpc_result = rx.await??;
match rpc_result {
ClientRpcResult::Ok(buffer_with_len) => {
let result = R::binprot_read(&mut buffer_with_len.0.as_slice())?;
Ok(result)
}
ClientRpcResult::Error(err) => Err(err.into()),
}
}
}
| 39.572519 | 98 | 0.494213 |
4b489de8da780c7db2f6360b8c9ca6953cc10b06 | 834 | #[doc = "Writer for register CAU_DIRECT14"]
pub type W = crate::W<u32, super::CAU_DIRECT14>;
#[doc = "Register CAU_DIRECT14 `reset()`'s with value 0"]
impl crate::ResetValue for super::CAU_DIRECT14 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Write proxy for field `CAU_DIRECT14`"]
pub struct CAU_DIRECT14_W<'a> {
w: &'a mut W,
}
impl<'a> CAU_DIRECT14_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff);
self.w
}
}
impl W {
#[doc = "Bits 0:31 - Direct register 14"]
#[inline(always)]
pub fn cau_direct14(&mut self) -> CAU_DIRECT14_W {
CAU_DIRECT14_W { w: self }
}
}
| 27.8 | 84 | 0.597122 |
ef1b976ae333f304fcfb9ebac9d30ac554ccd49a | 822 | use std::fmt::Debug;
fn elided(x: &i32) -> impl Copy { x }
//~^ ERROR explicit lifetime required in the type of `x` [E0621]
fn explicit<'a>(x: &'a i32) -> impl Copy { x }
//~^ ERROR cannot infer an appropriate lifetime
trait LifetimeTrait<'a> {}
impl<'a> LifetimeTrait<'a> for &'a i32 {}
fn with_bound<'a>(x: &'a i32) -> impl LifetimeTrait<'a> + 'static { x }
//~^ ERROR cannot infer an appropriate lifetime
// Tests that a closure type contianing 'b cannot be returned from a type where
// only 'a was expected.
fn move_lifetime_into_fn<'a, 'b>(x: &'a u32, y: &'b u32) -> impl Fn(&'a u32) {
//~^ ERROR lifetime mismatch
move |_| println!("{}", y)
}
fn ty_param_wont_outlive_static<T:Debug>(x: T) -> impl Debug + 'static {
//~^ ERROR the parameter type `T` may not live long enough
x
}
fn main() {}
| 29.357143 | 79 | 0.641119 |
91ceb60a4dc53a55f6af75c4d4645aecc8d648dc | 129,152 | use crate::{
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
progress_map::{LockoutIntervals, ProgressMap},
};
use chrono::prelude::*;
use solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db};
use solana_measure::measure::Measure;
use solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE,
vote_account::ArcVoteAccount,
};
use solana_sdk::{
clock::{Slot, UnixTimestamp},
hash::Hash,
instruction::Instruction,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
slot_history::{Check, SlotHistory},
};
use solana_vote_program::{
vote_instruction,
vote_state::{BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY},
};
use std::{
cmp::Ordering,
collections::{HashMap, HashSet},
fs::{self, File},
io::BufReader,
ops::{
Bound::{Included, Unbounded},
Deref,
},
path::{Path, PathBuf},
};
use thiserror::Error;
#[derive(PartialEq, Clone, Debug, AbiExample)]
pub enum SwitchForkDecision {
SwitchProof(Hash),
SameFork,
FailedSwitchThreshold(u64, u64),
FailedSwitchDuplicateRollback(Slot),
}
impl SwitchForkDecision {
pub fn to_vote_instruction(
&self,
vote: Vote,
vote_account_pubkey: &Pubkey,
authorized_voter_pubkey: &Pubkey,
) -> Option<Instruction> {
match self {
SwitchForkDecision::FailedSwitchThreshold(_, total_stake) => {
assert_ne!(*total_stake, 0);
None
}
SwitchForkDecision::FailedSwitchDuplicateRollback(_) => None,
SwitchForkDecision::SameFork => Some(vote_instruction::vote(
vote_account_pubkey,
authorized_voter_pubkey,
vote,
)),
SwitchForkDecision::SwitchProof(switch_proof_hash) => {
Some(vote_instruction::vote_switch(
vote_account_pubkey,
authorized_voter_pubkey,
vote,
*switch_proof_hash,
))
}
}
}
pub fn can_vote(&self) -> bool {
match self {
SwitchForkDecision::FailedSwitchThreshold(_, _) => false,
SwitchForkDecision::FailedSwitchDuplicateRollback(_) => false,
SwitchForkDecision::SameFork => true,
SwitchForkDecision::SwitchProof(_) => true,
}
}
}
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
pub type Result<T> = std::result::Result<T, TowerError>;
pub type Stake = u64;
pub type VotedStakes = HashMap<Slot, Stake>;
pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
// lint warning "bank_weight is never read"
#[allow(dead_code)]
pub(crate) struct ComputedBankState {
pub voted_stakes: VotedStakes,
pub total_stake: Stake,
pub bank_weight: u128,
// Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
// keyed by end of the range
pub lockout_intervals: LockoutIntervals,
pub my_latest_landed_vote: Option<Slot>,
}
#[frozen_abi(digest = "GMs1FxKteU7K4ZFRofMBqNhBpM4xkPVxfYod6R8DQmpT")]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
pub struct Tower {
node_pubkey: Pubkey,
threshold_depth: usize,
threshold_size: f64,
vote_state: VoteState,
last_vote: Vote,
#[serde(skip)]
// The blockhash used in the last vote transaction, may or may not equal the
// blockhash of the voted block itself, depending if the vote slot was refreshed.
// For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in
// block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5.
last_vote_tx_blockhash: Hash,
last_timestamp: BlockTimestamp,
#[serde(skip)]
pub(crate) ledger_path: PathBuf,
#[serde(skip)]
path: PathBuf,
#[serde(skip)]
tmp_path: PathBuf, // used before atomic fs::rename()
#[serde(skip)]
// Restored last voted slot which cannot be found in SlotHistory at replayed root
// (This is a special field for slashing-free validator restart with edge cases).
// This could be emptied after some time; but left intact indefinitely for easier
// implementation
// Further, stray slot can be stale or not. `Stale` here means whether given
// bank_forks (=~ ledger) lacks the slot or not.
stray_restored_slot: Option<Slot>,
#[serde(skip)]
pub last_switch_threshold_check: Option<(Slot, SwitchForkDecision)>,
}
impl Default for Tower {
fn default() -> Self {
let mut tower = Self {
node_pubkey: Pubkey::default(),
threshold_depth: VOTE_THRESHOLD_DEPTH,
threshold_size: VOTE_THRESHOLD_SIZE,
vote_state: VoteState::default(),
last_vote: Vote::default(),
last_timestamp: BlockTimestamp::default(),
last_vote_tx_blockhash: Hash::default(),
ledger_path: PathBuf::default(),
path: PathBuf::default(),
tmp_path: PathBuf::default(),
stray_restored_slot: Option::default(),
last_switch_threshold_check: Option::default(),
};
// VoteState::root_slot is ensured to be Some in Tower
tower.vote_state.root_slot = Some(Slot::default());
tower
}
}
impl Tower {
pub fn new(
node_pubkey: &Pubkey,
vote_account_pubkey: &Pubkey,
root: Slot,
bank: &Bank,
ledger_path: &Path,
) -> Self {
let mut tower = Tower {
ledger_path: ledger_path.into(),
..Tower::default()
};
tower.set_identity(*node_pubkey);
tower.initialize_lockouts_from_bank(vote_account_pubkey, root, bank);
tower
}
fn set_identity(&mut self, node_pubkey: Pubkey) {
let path = Self::get_filename(&self.ledger_path, &node_pubkey);
let tmp_path = Self::get_tmp_filename(&path);
self.node_pubkey = node_pubkey;
self.path = path;
self.tmp_path = tmp_path;
}
#[cfg(test)]
pub fn new_for_tests(threshold_depth: usize, threshold_size: f64) -> Self {
Self {
threshold_depth,
threshold_size,
..Tower::default()
}
}
pub fn new_from_bankforks(
bank_forks: &BankForks,
ledger_path: &Path,
node_pubkey: &Pubkey,
vote_account: &Pubkey,
) -> Self {
let root_bank = bank_forks.root_bank();
let (_progress, heaviest_subtree_fork_choice) =
crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
root_bank.deref(),
bank_forks.frozen_banks().values().cloned().collect(),
node_pubkey,
vote_account,
);
let root = root_bank.slot();
let (best_slot, best_hash) = heaviest_subtree_fork_choice.best_overall_slot();
let heaviest_bank = bank_forks
.get_with_checked_hash((best_slot, best_hash))
.expect(
"The best overall slot must be one of `frozen_banks` which all exist in bank_forks",
)
.clone();
Self::new(node_pubkey, vote_account, root, &heaviest_bank, ledger_path)
}
pub(crate) fn collect_vote_lockouts<F>(
vote_account_pubkey: &Pubkey,
bank_slot: Slot,
vote_accounts: F,
ancestors: &HashMap<Slot, HashSet<Slot>>,
get_frozen_hash: impl Fn(Slot) -> Option<Hash>,
latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks,
) -> ComputedBankState
where
F: IntoIterator<Item = (Pubkey, (u64, ArcVoteAccount))>,
{
let mut vote_slots = HashSet::new();
let mut voted_stakes = HashMap::new();
let mut total_stake = 0;
let mut bank_weight = 0;
// Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
// keyed by end of the range
let mut lockout_intervals = LockoutIntervals::new();
let mut my_latest_landed_vote = None;
for (key, (voted_stake, account)) in vote_accounts {
if voted_stake == 0 {
continue;
}
trace!("{} {} with stake {}", vote_account_pubkey, key, voted_stake);
let mut vote_state = match account.vote_state().as_ref() {
Err(_) => {
datapoint_warn!(
"tower_warn",
(
"warn",
format!("Unable to get vote_state from account {}", key),
String
),
);
continue;
}
Ok(vote_state) => vote_state.clone(),
};
for vote in &vote_state.votes {
lockout_intervals
.entry(vote.last_locked_out_slot())
.or_insert_with(Vec::new)
.push((vote.slot, key));
}
if key == *vote_account_pubkey {
my_latest_landed_vote = vote_state.nth_recent_vote(0).map(|v| v.slot);
debug!("vote state {:?}", vote_state);
debug!(
"observed slot {}",
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64
);
debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
datapoint_info!(
"tower-observed",
(
"slot",
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0),
i64
),
("root", vote_state.root_slot.unwrap_or(0), i64)
);
}
let start_root = vote_state.root_slot;
// Add the last vote to update the `heaviest_subtree_fork_choice`
if let Some(last_landed_voted_slot) = vote_state.last_voted_slot() {
latest_validator_votes_for_frozen_banks.check_add_vote(
key,
last_landed_voted_slot,
get_frozen_hash(last_landed_voted_slot),
true,
);
}
vote_state.process_slot_vote_unchecked(bank_slot);
for vote in &vote_state.votes {
bank_weight += vote.lockout() as u128 * voted_stake as u128;
vote_slots.insert(vote.slot);
}
if start_root != vote_state.root_slot {
if let Some(root) = start_root {
let vote = Lockout {
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
slot: root,
};
trace!("ROOT: {}", vote.slot);
bank_weight += vote.lockout() as u128 * voted_stake as u128;
vote_slots.insert(vote.slot);
}
}
if let Some(root) = vote_state.root_slot {
let vote = Lockout {
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
slot: root,
};
bank_weight += vote.lockout() as u128 * voted_stake as u128;
vote_slots.insert(vote.slot);
}
// The last vote in the vote stack is a simulated vote on bank_slot, which
// we added to the vote stack earlier in this function by calling process_vote().
// We don't want to update the ancestors stakes of this vote b/c it does not
// represent an actual vote by the validator.
// Note: It should not be possible for any vote state in this bank to have
// a vote for a slot >= bank_slot, so we are guaranteed that the last vote in
// this vote stack is the simulated vote, so this fetch should be sufficient
// to find the last unsimulated vote.
assert_eq!(
vote_state.nth_recent_vote(0).map(|l| l.slot),
Some(bank_slot)
);
if let Some(vote) = vote_state.nth_recent_vote(1) {
// Update all the parents of this last vote with the stake of this vote account
Self::update_ancestor_voted_stakes(
&mut voted_stakes,
vote.slot,
voted_stake,
ancestors,
);
}
total_stake += voted_stake;
}
// TODO: populate_ancestor_voted_stakes only adds zeros. Comment why
// that is necessary (if so).
Self::populate_ancestor_voted_stakes(&mut voted_stakes, vote_slots, ancestors);
ComputedBankState {
voted_stakes,
total_stake,
bank_weight,
lockout_intervals,
my_latest_landed_vote,
}
}
pub fn is_slot_confirmed(
&self,
slot: Slot,
voted_stakes: &VotedStakes,
total_stake: Stake,
) -> bool {
voted_stakes
.get(&slot)
.map(|stake| (*stake as f64 / total_stake as f64) > self.threshold_size)
.unwrap_or(false)
}
pub fn tower_slots(&self) -> Vec<Slot> {
self.vote_state.tower()
}
pub fn last_vote_tx_blockhash(&self) -> Hash {
self.last_vote_tx_blockhash
}
pub fn refresh_last_vote_tx_blockhash(&mut self, new_vote_tx_blockhash: Hash) {
self.last_vote_tx_blockhash = new_vote_tx_blockhash;
}
fn apply_vote_and_generate_vote_diff(
local_vote_state: &mut VoteState,
slot: Slot,
hash: Hash,
last_voted_slot_in_bank: Option<Slot>,
) -> Vote {
let vote = Vote::new(vec![slot], hash);
local_vote_state.process_vote_unchecked(&vote);
let slots = if let Some(last_voted_slot_in_bank) = last_voted_slot_in_bank {
local_vote_state
.votes
.iter()
.map(|v| v.slot)
.skip_while(|s| *s <= last_voted_slot_in_bank)
.collect()
} else {
local_vote_state.votes.iter().map(|v| v.slot).collect()
};
trace!(
"new vote with {:?} {:?} {:?}",
last_voted_slot_in_bank,
slots,
local_vote_state.votes
);
Vote::new(slots, hash)
}
pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
let (_stake, vote_account) = bank.get_vote_account(vote_account_pubkey)?;
let slot = vote_account.vote_state().as_ref().ok()?.last_voted_slot();
slot
}
pub fn record_bank_vote(&mut self, bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
let last_voted_slot_in_bank = Self::last_voted_slot_in_bank(bank, vote_account_pubkey);
// Returns the new root if one is made after applying a vote for the given bank to
// `self.vote_state`
self.record_bank_vote_and_update_lockouts(bank.slot(), bank.hash(), last_voted_slot_in_bank)
}
fn record_bank_vote_and_update_lockouts(
&mut self,
vote_slot: Slot,
vote_hash: Hash,
last_voted_slot_in_bank: Option<Slot>,
) -> Option<Slot> {
trace!("{} record_vote for {}", self.node_pubkey, vote_slot);
let old_root = self.root();
let mut new_vote = Self::apply_vote_and_generate_vote_diff(
&mut self.vote_state,
vote_slot,
vote_hash,
last_voted_slot_in_bank,
);
new_vote.timestamp = self.maybe_timestamp(self.last_vote.last_voted_slot().unwrap_or(0));
self.last_vote = new_vote;
let new_root = self.root();
datapoint_info!(
"tower-vote",
("latest", vote_slot, i64),
("root", new_root, i64)
);
if old_root != new_root {
Some(new_root)
} else {
None
}
}
#[cfg(test)]
pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option<Slot> {
self.record_bank_vote_and_update_lockouts(slot, hash, self.last_voted_slot())
}
pub fn last_voted_slot(&self) -> Option<Slot> {
self.last_vote.last_voted_slot()
}
pub fn last_voted_slot_hash(&self) -> Option<(Slot, Hash)> {
self.last_vote.last_voted_slot_hash()
}
pub fn stray_restored_slot(&self) -> Option<Slot> {
self.stray_restored_slot
}
pub fn last_vote(&mut self) -> Vote {
self.last_vote.clone()
}
fn maybe_timestamp(&mut self, current_slot: Slot) -> Option<UnixTimestamp> {
if current_slot > self.last_timestamp.slot
|| self.last_timestamp.slot == 0 && current_slot == self.last_timestamp.slot
{
let timestamp = Utc::now().timestamp();
if timestamp >= self.last_timestamp.timestamp {
self.last_timestamp = BlockTimestamp {
slot: current_slot,
timestamp,
};
return Some(timestamp);
}
}
None
}
// root may be forcibly set by arbitrary replay root slot, for example from a root
// after replaying a snapshot.
// Also, tower.root() couldn't be None; initialize_lockouts() ensures that.
// Conceptually, every tower must have been constructed from a concrete starting point,
// which establishes the origin of trust (i.e. root) whether booting from genesis (slot 0) or
// snapshot (slot N). In other words, there should be no possibility a Tower doesn't have
// root, unlike young vote accounts.
pub fn root(&self) -> Slot {
self.vote_state.root_slot.unwrap()
}
// a slot is recent if it's newer than the last vote we have
pub fn is_recent(&self, slot: Slot) -> bool {
if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
if slot <= last_voted_slot {
return false;
}
}
true
}
pub fn has_voted(&self, slot: Slot) -> bool {
for vote in &self.vote_state.votes {
if slot == vote.slot {
return true;
}
}
false
}
pub fn is_locked_out(&self, slot: Slot, ancestors: &HashSet<Slot>) -> bool {
if !self.is_recent(slot) {
return true;
}
// Check if a slot is locked out by simulating adding a vote for that
// slot to the current lockouts to pop any expired votes. If any of the
// remaining voted slots are on a different fork from the checked slot,
// it's still locked out.
let mut vote_state = self.vote_state.clone();
vote_state.process_slot_vote_unchecked(slot);
for vote in &vote_state.votes {
if slot != vote.slot && !ancestors.contains(&vote.slot) {
return true;
}
}
if let Some(root_slot) = vote_state.root_slot {
if slot != root_slot {
// This case should never happen because bank forks purges all
// non-descendants of the root every time root is set
assert!(
ancestors.contains(&root_slot),
"ancestors: {:?}, slot: {} root: {}",
ancestors,
slot,
root_slot
);
}
}
false
}
fn is_candidate_slot_descendant_of_last_vote(
candidate_slot: Slot,
last_voted_slot: Slot,
ancestors: &HashMap<Slot, HashSet<u64>>,
) -> Option<bool> {
ancestors
.get(&candidate_slot)
.map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&last_voted_slot))
}
#[allow(clippy::too_many_arguments)]
fn make_check_switch_threshold_decision(
&self,
switch_slot: Slot,
ancestors: &HashMap<Slot, HashSet<u64>>,
descendants: &HashMap<Slot, HashSet<u64>>,
progress: &ProgressMap,
total_stake: u64,
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
) -> SwitchForkDecision {
self.last_voted_slot_hash()
.map(|(last_voted_slot, last_voted_hash)| {
let root = self.root();
let empty_ancestors = HashSet::default();
let empty_ancestors_due_to_minor_unsynced_ledger = || {
// This condition (stale stray last vote) shouldn't occur under normal validator
// operation, indicating something unusual happened.
// This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
// However, returning empty ancestors as a fallback here shouldn't result in
// slashing by itself (Note that we couldn't fully preclude any kind of slashing if
// the failure was OS or HW level).
// Firstly, lockout is ensured elsewhere.
// Also, there is no risk of optimistic conf. violation. Although empty ancestors
// could result in incorrect (= more than actual) locked_out_stake and
// false-positive SwitchProof later in this function, there should be no such a
// heavier fork candidate, first of all, if the last vote (or any of its
// unavailable ancestors) were already optimistically confirmed.
// The only exception is that other validator is already violating it...
if self.is_first_switch_check() && switch_slot < last_voted_slot {
// `switch < last` is needed not to warn! this message just because of using
// newer snapshots on validator restart
let message = format!(
"bank_forks doesn't have corresponding data for the stray restored \
last vote({}), meaning some inconsistency between saved tower and ledger.",
last_voted_slot
);
warn!("{}", message);
datapoint_warn!("tower_warn", ("warn", message, String));
}
&empty_ancestors
};
let suspended_decision_due_to_major_unsynced_ledger = || {
// This peculiar corner handling is needed mainly for a tower which is newer than
// blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
// This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
// When we're in this clause, it basically means validator is badly running
// with a future tower while replaying past slots, especially problematic is
// last_voted_slot.
// So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
// there would be slashing because of double vote on one of last_vote_ancestors.
// (Well, needless to say, re-creating the duplicate block must be handled properly
// at the banking stage: https://github.com/solana-labs/solana/issues/8232)
//
// To be specific, the replay stage is tricked into a false perception where
// last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`, stale, and
// stray slots (which should always be empty_ancestors).
//
// This is covered by test_future_tower_* in local_cluster
SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
};
let rollback_due_to_to_to_duplicate_ancestor = |latest_duplicate_ancestor| {
SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor)
};
// `heaviest_subtree_fork_choice` entries are not cleaned by duplicate block purging/rollback logic,
// so this is safe to check here. We return here if the last voted slot was rolled back/purged due to
// being a duplicate because `ancestors`/`descendants`/`progress` structurs may be missing this slot due
// to duplicate purging. This would cause many of the `unwrap()` checks below to fail.
//
// TODO: Handle if the last vote is on a dupe, and then we restart. The dupe won't be in
// heaviest_subtree_fork_choice, so `heaviest_subtree_fork_choice.latest_invalid_ancestor()` will return
// None, but the last vote will be persisted in tower.
let switch_hash = progress.get_hash(switch_slot).expect("Slot we're trying to switch to must exist AND be frozen in progress map");
if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash)) {
// We're rolling back because one of the ancestors of the last vote was a duplicate. In this
// case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
// just fail the switch check because there's no point in voting on an ancestor. ReplayStage
// should then have a special case continue building an alternate fork from this ancestor, NOT
// the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks
// on latest vote. See `ReplayStage::select_vote_and_reset_forks()` for more details.
if heaviest_subtree_fork_choice.is_strict_ancestor(&(switch_slot, switch_hash), &(last_voted_slot, last_voted_hash)) {
return rollback_due_to_to_to_duplicate_ancestor(latest_duplicate_ancestor);
} else if progress.get_hash(last_voted_slot).map(|current_slot_hash| current_slot_hash != last_voted_hash).unwrap_or(true) {
// Our last vote slot was purged because it was on a duplicate fork, don't continue below
// where checks may panic. We allow a freebie vote here that may violate switching
// thresholds
// TODO: Properly handle this case
info!("Allowing switch vote on {:?} because last vote {:?} was rolled back", (switch_slot, switch_hash), (last_voted_slot, last_voted_hash));
return SwitchForkDecision::SwitchProof(Hash::default());
}
}
let last_vote_ancestors =
ancestors.get(&last_voted_slot).unwrap_or_else(|| {
if self.is_stray_last_vote() {
// Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
// return Some(_), justifying to panic! here.
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
// touched in that case as well.
// In other words, except being stray, all other slots have been voted on while
// this validator has been running, so we must be able to fetch ancestors for
// all of them.
empty_ancestors_due_to_minor_unsynced_ledger()
} else {
panic!("no ancestors found with slot: {}", last_voted_slot);
}
});
let switch_slot_ancestors = ancestors.get(&switch_slot).unwrap();
if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) {
// If the `switch_slot is a descendant of the last vote,
// no switching proof is necessary
return SwitchForkDecision::SameFork;
}
if last_vote_ancestors.contains(&switch_slot) {
if self.is_stray_last_vote() {
return suspended_decision_due_to_major_unsynced_ledger();
} else {
panic!(
"Should never consider switching to ancestor ({}) of last vote: {}, ancestors({:?})",
switch_slot,
last_voted_slot,
last_vote_ancestors,
);
}
}
// By this point, we know the `switch_slot` is on a different fork
// (is neither an ancestor nor descendant of `last_vote`), so a
// switching proof is necessary
let switch_proof = Hash::default();
let mut locked_out_stake = 0;
let mut locked_out_vote_accounts = HashSet::new();
for (candidate_slot, descendants) in descendants.iter() {
// 1) Don't consider any banks that haven't been frozen yet
// because the needed stats are unavailable
// 2) Only consider lockouts at the latest `frozen` bank
// on each fork, as that bank will contain all the
// lockout intervals for ancestors on that fork as well.
// 3) Don't consider lockouts on the `last_vote` itself
// 4) Don't consider lockouts on any descendants of
// `last_vote`
// 5) Don't consider any banks before the root because
// all lockouts must be ancestors of `last_vote`
if !progress.get_fork_stats(*candidate_slot).map(|stats| stats.computed).unwrap_or(false)
// If any of the descendants have the `computed` flag set, then there must be a more
// recent frozen bank on this fork to use, so we can ignore this one. Otherwise,
// even if this bank has descendants, if they have not yet been frozen / stats computed,
// then use this bank as a representative for the fork.
|| descendants.iter().any(|d| progress.get_fork_stats(*d).map(|stats| stats.computed).unwrap_or(false))
|| *candidate_slot == last_voted_slot
// Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not
// want to count votes on the same fork.
|| Self::is_candidate_slot_descendant_of_last_vote(*candidate_slot, last_voted_slot, ancestors).expect("exists in descendants map, so must exist in ancestors map")
|| *candidate_slot <= root
{
continue;
}
// By the time we reach here, any ancestors of the `last_vote`,
// should have been filtered out, as they all have a descendant,
// namely the `last_vote` itself.
assert!(!last_vote_ancestors.contains(candidate_slot));
// Evaluate which vote accounts in the bank are locked out
// in the interval candidate_slot..last_vote, which means
// finding any lockout intervals in the `lockout_intervals` tree
// for this bank that contain `last_vote`.
let lockout_intervals = &progress
.get(candidate_slot)
.unwrap()
.fork_stats
.lockout_intervals;
// Find any locked out intervals for vote accounts in this bank with
// `lockout_interval_end` >= `last_vote`, which implies they are locked out at
// `last_vote` on another fork.
for (_lockout_interval_end, intervals_keyed_by_end) in lockout_intervals.range((Included(last_voted_slot), Unbounded)) {
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
if locked_out_vote_accounts.contains(vote_account_pubkey) {
continue;
}
// Only count lockouts on slots that are:
// 1) Not ancestors of `last_vote`, meaning being on different fork
// 2) Not from before the current root as we can't determine if
// anything before the root was an ancestor of `last_vote` or not
if !last_vote_ancestors.contains(lockout_interval_start)
// Given a `lockout_interval_start` < root that appears in a
// bank for a `candidate_slot`, it must be that `lockout_interval_start`
// is an ancestor of the current root, because `candidate_slot` is a
// descendant of the current root
&& *lockout_interval_start > root
{
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
locked_out_stake += stake;
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
return SwitchForkDecision::SwitchProof(switch_proof);
}
locked_out_vote_accounts.insert(vote_account_pubkey);
}
}
}
}
// Check the latest votes for potentially gossip votes that haven't landed yet
for (vote_account_pubkey, (candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash)) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes() {
if locked_out_vote_accounts.contains(&vote_account_pubkey) {
continue;
}
if *candidate_latest_frozen_vote > last_voted_slot
&&
// Because `candidate_latest_frozen_vote` is the last vote made by some validator
// in the cluster for a frozen bank `B` observed through gossip, we may have cleared
// that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
// like so:
//
// |----------X ------candidate_latest_frozen_vote (frozen)
// old root
// |----------new root ----last_voted_slot
//
// In most cases, because `last_voted_slot` must be a descendant of `root`, then
// if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
// directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
// because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
// should be safe to count towards the switching proof:
//
// However, there is also the possibility that `last_voted_slot` is a stray, in which
// case we cannot make this conclusion as we do not know the ancestors/descendants
// of strays. Hence we err on the side of caution here and ignore this vote. This
// is ok because validators voting on different unrooted forks should eventually vote
// on some descendant of the root, at which time they can be included in switching proofs.
!Self::is_candidate_slot_descendant_of_last_vote(
*candidate_latest_frozen_vote, last_voted_slot, ancestors)
.unwrap_or(true) {
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
locked_out_stake += stake;
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
return SwitchForkDecision::SwitchProof(switch_proof);
}
locked_out_vote_accounts.insert(vote_account_pubkey);
}
}
// We have not detected sufficient lockout past the last voted slot to generate
// a switching proof
SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
})
.unwrap_or(SwitchForkDecision::SameFork)
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn check_switch_threshold(
&mut self,
switch_slot: Slot,
ancestors: &HashMap<Slot, HashSet<u64>>,
descendants: &HashMap<Slot, HashSet<u64>>,
progress: &ProgressMap,
total_stake: u64,
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
) -> SwitchForkDecision {
let decision = self.make_check_switch_threshold_decision(
switch_slot,
ancestors,
descendants,
progress,
total_stake,
epoch_vote_accounts,
latest_validator_votes_for_frozen_banks,
heaviest_subtree_fork_choice,
);
let new_check = Some((switch_slot, decision.clone()));
if new_check != self.last_switch_threshold_check {
trace!(
"new switch threshold check: slot {}: {:?}",
switch_slot,
decision,
);
self.last_switch_threshold_check = new_check;
}
decision
}
fn is_first_switch_check(&self) -> bool {
self.last_switch_threshold_check.is_none()
}
pub fn check_vote_stake_threshold(
&self,
slot: Slot,
voted_stakes: &VotedStakes,
total_stake: Stake,
) -> bool {
let mut vote_state = self.vote_state.clone();
vote_state.process_slot_vote_unchecked(slot);
let vote = vote_state.nth_recent_vote(self.threshold_depth);
if let Some(vote) = vote {
if let Some(fork_stake) = voted_stakes.get(&vote.slot) {
let lockout = *fork_stake as f64 / total_stake as f64;
trace!(
"fork_stake slot: {}, vote slot: {}, lockout: {} fork_stake: {} total_stake: {}",
slot, vote.slot, lockout, fork_stake, total_stake
);
if vote.confirmation_count as usize > self.threshold_depth {
for old_vote in &self.vote_state.votes {
if old_vote.slot == vote.slot
&& old_vote.confirmation_count == vote.confirmation_count
{
return true;
}
}
}
lockout > self.threshold_size
} else {
false
}
} else {
true
}
}
/// Update lockouts for all the ancestors
pub(crate) fn populate_ancestor_voted_stakes(
voted_stakes: &mut VotedStakes,
vote_slots: impl IntoIterator<Item = Slot>,
ancestors: &HashMap<Slot, HashSet<Slot>>,
) {
// If there's no ancestors, that means this slot must be from before the current root,
// in which case the lockouts won't be calculated in bank_weight anyways, so ignore
// this slot
for vote_slot in vote_slots {
if let Some(slot_ancestors) = ancestors.get(&vote_slot) {
voted_stakes.entry(vote_slot).or_default();
for slot in slot_ancestors {
voted_stakes.entry(*slot).or_default();
}
}
}
}
/// Update stake for all the ancestors.
/// Note, stake is the same for all the ancestor.
fn update_ancestor_voted_stakes(
voted_stakes: &mut VotedStakes,
voted_slot: Slot,
voted_stake: u64,
ancestors: &HashMap<Slot, HashSet<Slot>>,
) {
// If there's no ancestors, that means this slot must be from
// before the current root, so ignore this slot
if let Some(vote_slot_ancestors) = ancestors.get(&voted_slot) {
*voted_stakes.entry(voted_slot).or_default() += voted_stake;
for slot in vote_slot_ancestors {
*voted_stakes.entry(*slot).or_default() += voted_stake;
}
}
}
fn voted_slots(&self) -> Vec<Slot> {
self.vote_state
.votes
.iter()
.map(|lockout| lockout.slot)
.collect()
}
pub fn is_stray_last_vote(&self) -> bool {
if let Some(last_voted_slot) = self.last_voted_slot() {
if let Some(stray_restored_slot) = self.stray_restored_slot {
return stray_restored_slot == last_voted_slot;
}
}
false
}
// The tower root can be older/newer if the validator booted from a newer/older snapshot, so
// tower lockouts may need adjustment
pub fn adjust_lockouts_after_replay(
mut self,
replayed_root: Slot,
slot_history: &SlotHistory,
) -> Result<Self> {
// sanity assertions for roots
let tower_root = self.root();
info!(
"adjusting lockouts (after replay up to {}): {:?} tower root: {} replayed root: {}",
replayed_root,
self.voted_slots(),
tower_root,
replayed_root,
);
assert_eq!(slot_history.check(replayed_root), Check::Found);
assert!(
self.last_vote == Vote::default() && self.vote_state.votes.is_empty()
|| self.last_vote != Vote::default() && !self.vote_state.votes.is_empty(),
"last vote: {:?} vote_state.votes: {:?}",
self.last_vote,
self.vote_state.votes
);
if let Some(last_voted_slot) = self.last_voted_slot() {
if tower_root <= replayed_root {
// Normally, we goes into this clause with possible help of
// reconcile_blockstore_roots_with_tower()
if slot_history.check(last_voted_slot) == Check::TooOld {
// We could try hard to anchor with other older votes, but opt to simplify the
// following logic
return Err(TowerError::TooOldTower(
last_voted_slot,
slot_history.oldest(),
));
}
self.adjust_lockouts_with_slot_history(slot_history)?;
self.initialize_root(replayed_root);
} else {
// This should never occur under normal operation.
// While this validator's voting is suspended this way,
// suspended_decision_due_to_major_unsynced_ledger() will be also touched.
let message = format!(
"For some reason, we're REPROCESSING slots which has already been \
voted and ROOTED by us; \
VOTING will be SUSPENDED UNTIL {}!",
last_voted_slot,
);
error!("{}", message);
datapoint_error!("tower_error", ("error", message, String));
// Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
// using a synthesized SlotHistory.
let mut warped_slot_history = (*slot_history).clone();
// Blockstore doesn't have the tower_root slot because of
// (replayed_root < tower_root) in this else clause, meaning the tower is from
// the future from the view of blockstore.
// Pretend the blockstore has the future tower_root to anchor exactly with that
// slot by adding tower_root to a slot history. The added slot will be newer
// than all slots in the slot history (remember tower_root > replayed_root),
// satisfying the slot history invariant.
// Thus, the whole process will be safe as well because tower_root exists
// within both tower and slot history, guaranteeing the success of adjustment
// and retaining all of future votes correctly while sanitizing.
warped_slot_history.add(tower_root);
self.adjust_lockouts_with_slot_history(&warped_slot_history)?;
// don't update root; future tower's root should be kept across validator
// restarts to continue to show the scary messages at restarts until the next
// voting.
}
} else {
// This else clause is for newly created tower.
// initialize_lockouts_from_bank() should ensure the following invariant,
// otherwise we're screwing something up.
assert_eq!(tower_root, replayed_root);
}
Ok(self)
}
fn adjust_lockouts_with_slot_history(&mut self, slot_history: &SlotHistory) -> Result<()> {
let tower_root = self.root();
// retained slots will be consisted only from divergent slots
let mut retain_flags_for_each_vote_in_reverse: Vec<_> =
Vec::with_capacity(self.vote_state.votes.len());
let mut still_in_future = true;
let mut past_outside_history = false;
let mut checked_slot = None;
let mut anchored_slot = None;
let mut slots_in_tower = vec![tower_root];
slots_in_tower.extend(self.voted_slots());
// iterate over votes + root (if any) in the newest => oldest order
// bail out early if bad condition is found
for slot_in_tower in slots_in_tower.iter().rev() {
let check = slot_history.check(*slot_in_tower);
if anchored_slot.is_none() && check == Check::Found {
anchored_slot = Some(*slot_in_tower);
} else if anchored_slot.is_some() && check == Check::NotFound {
// this can't happen unless we're fed with bogus snapshot
return Err(TowerError::FatallyInconsistent("diverged ancestor?"));
}
if still_in_future && check != Check::Future {
still_in_future = false;
} else if !still_in_future && check == Check::Future {
// really odd cases: bad ordered votes?
return Err(TowerError::FatallyInconsistent("time warped?"));
}
if !past_outside_history && check == Check::TooOld {
past_outside_history = true;
} else if past_outside_history && check != Check::TooOld {
// really odd cases: bad ordered votes?
return Err(TowerError::FatallyInconsistent(
"not too old once after got too old?",
));
}
if let Some(checked_slot) = checked_slot {
// This is really special, only if tower is initialized and contains
// a vote for the root, the root slot can repeat only once
let voting_for_root =
*slot_in_tower == checked_slot && *slot_in_tower == tower_root;
if !voting_for_root {
// Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot
// including all vote slot and the root slot.
assert!(
*slot_in_tower < checked_slot,
"slot_in_tower({}) < checked_slot({})",
*slot_in_tower,
checked_slot
);
}
}
checked_slot = Some(*slot_in_tower);
retain_flags_for_each_vote_in_reverse.push(anchored_slot.is_none());
}
// Check for errors if not anchored
info!("adjusted tower's anchored slot: {:?}", anchored_slot);
if anchored_slot.is_none() {
// this error really shouldn't happen unless ledger/tower is corrupted
return Err(TowerError::FatallyInconsistent(
"no common slot for rooted tower",
));
}
assert_eq!(
slots_in_tower.len(),
retain_flags_for_each_vote_in_reverse.len()
);
// pop for the tower root
retain_flags_for_each_vote_in_reverse.pop();
let mut retain_flags_for_each_vote =
retain_flags_for_each_vote_in_reverse.into_iter().rev();
let original_votes_len = self.vote_state.votes.len();
self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap());
if self.vote_state.votes.is_empty() {
info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
// we might not have banks for those votes so just reset.
// That's because the votes may well past replayed_root
self.last_vote = Vote::default();
} else {
info!(
"{} restored votes (out of {}) were on different fork or are upcoming votes on unrooted slots: {:?}!",
self.voted_slots().len(),
original_votes_len,
self.voted_slots()
);
assert_eq!(
self.last_vote.last_voted_slot().unwrap(),
*self.voted_slots().last().unwrap()
);
self.stray_restored_slot = Some(self.last_vote.last_voted_slot().unwrap());
}
Ok(())
}
fn initialize_lockouts_from_bank(
&mut self,
vote_account_pubkey: &Pubkey,
root: Slot,
bank: &Bank,
) {
if let Some((_stake, vote_account)) = bank.get_vote_account(vote_account_pubkey) {
self.vote_state = vote_account
.vote_state()
.as_ref()
.expect("vote_account isn't a VoteState?")
.clone();
self.initialize_root(root);
self.initialize_lockouts(|v| v.slot > root);
trace!(
"Lockouts in tower for {} is initialized using bank {}",
self.vote_state.node_pubkey,
bank.slot(),
);
} else {
self.initialize_root(root);
info!(
"vote account({}) not found in bank (slot={})",
vote_account_pubkey,
bank.slot()
);
}
}
fn initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, should_retain: F) {
self.vote_state.votes.retain(should_retain);
}
// Updating root is needed to correctly restore from newly-saved tower for the next
// boot
fn initialize_root(&mut self, root: Slot) {
self.vote_state.root_slot = Some(root);
}
pub fn get_filename(path: &Path, node_pubkey: &Pubkey) -> PathBuf {
path.join(format!("tower-{}", node_pubkey))
.with_extension("bin")
}
fn get_tmp_filename(path: &Path) -> PathBuf {
path.with_extension("bin.new")
}
pub fn save(&self, node_keypair: &Keypair) -> Result<()> {
let mut measure = Measure::start("tower_save-ms");
if self.node_pubkey != node_keypair.pubkey() {
return Err(TowerError::WrongTower(format!(
"node_pubkey is {:?} but found tower for {:?}",
node_keypair.pubkey(),
self.node_pubkey
)));
}
let filename = &self.path;
let new_filename = &self.tmp_path;
{
// overwrite anything if exists
let mut file = File::create(&new_filename)?;
let saved_tower = SavedTower::new(self, node_keypair)?;
bincode::serialize_into(&mut file, &saved_tower)?;
// file.sync_all() hurts performance; pipeline sync-ing and submitting votes to the cluster!
}
trace!("persisted votes: {:?}", self.voted_slots());
fs::rename(&new_filename, &filename)?;
// self.path.parent().sync_all() hurts performance same as the above sync
measure.stop();
inc_new_counter_info!("tower_save-ms", measure.as_ms() as usize);
Ok(())
}
pub fn restore(ledger_path: &Path, node_pubkey: &Pubkey) -> Result<Self> {
let filename = Self::get_filename(ledger_path, node_pubkey);
// Ensure to create parent dir here, because restore() precedes save() always
fs::create_dir_all(&filename.parent().unwrap())?;
let file = File::open(&filename)?;
let mut stream = BufReader::new(file);
let saved_tower: SavedTower = bincode::deserialize_from(&mut stream)?;
if !saved_tower.verify(node_pubkey) {
return Err(TowerError::InvalidSignature);
}
let mut tower = saved_tower.deserialize()?;
tower.ledger_path = ledger_path.into();
tower.path = filename;
tower.tmp_path = Self::get_tmp_filename(&tower.path);
// check that the tower actually belongs to this node
if &tower.node_pubkey != node_pubkey {
return Err(TowerError::WrongTower(format!(
"node_pubkey is {:?} but found tower for {:?}",
node_pubkey, tower.node_pubkey
)));
}
Ok(tower)
}
}
#[derive(Error, Debug)]
pub enum TowerError {
#[error("IO Error: {0}")]
IoError(#[from] std::io::Error),
#[error("Serialization Error: {0}")]
SerializeError(#[from] bincode::Error),
#[error("The signature on the saved tower is invalid")]
InvalidSignature,
#[error("The tower does not match this validator: {0}")]
WrongTower(String),
#[error(
"The tower is too old: \
newest slot in tower ({0}) << oldest slot in available history ({1})"
)]
TooOldTower(Slot, Slot),
#[error("The tower is fatally inconsistent with blockstore: {0}")]
FatallyInconsistent(&'static str),
#[error("The tower is useless because of new hard fork: {0}")]
HardFork(Slot),
}
impl TowerError {
pub fn is_file_missing(&self) -> bool {
if let TowerError::IoError(io_err) = &self {
io_err.kind() == std::io::ErrorKind::NotFound
} else {
false
}
}
}
#[frozen_abi(digest = "Gaxfwvx5MArn52mKZQgzHmDCyn5YfCuTHvp5Et3rFfpp")]
#[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
pub struct SavedTower {
signature: Signature,
data: Vec<u8>,
}
impl SavedTower {
pub fn new<T: Signer>(tower: &Tower, keypair: &T) -> Result<Self> {
let data = bincode::serialize(tower)?;
let signature = keypair.sign_message(&data);
Ok(Self { signature, data })
}
pub fn verify(&self, pubkey: &Pubkey) -> bool {
self.signature.verify(pubkey.as_ref(), &self.data)
}
pub fn deserialize(&self) -> Result<Tower> {
bincode::deserialize(&self.data).map_err(|e| e.into())
}
}
// Given an untimely crash, tower may have roots that are not reflected in blockstore,
// or the reverse of this.
// That's because we don't impose any ordering guarantee or any kind of write barriers
// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
pub fn reconcile_blockstore_roots_with_tower(
tower: &Tower,
blockstore: &Blockstore,
) -> blockstore_db::Result<()> {
let tower_root = tower.root();
let last_blockstore_root = blockstore.last_root();
if last_blockstore_root < tower_root {
// Ensure tower_root itself to exist and be marked as rooted in the blockstore
// in addition to its ancestors.
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, blockstore)
.take_while(|current| match current.cmp(&last_blockstore_root) {
Ordering::Greater => true,
Ordering::Equal => false,
Ordering::Less => panic!(
"couldn't find a last_blockstore_root upwards from: {}!?",
tower_root
),
})
.collect();
if !new_roots.is_empty() {
info!(
"Reconciling slots as root based on tower root: {:?} ({}..{}) ",
new_roots, tower_root, last_blockstore_root
);
blockstore.set_roots(new_roots.iter())?;
} else {
// This indicates we're in bad state; but still don't panic here.
// That's because we might have a chance of recovering properly with
// newer snapshot.
warn!(
"Couldn't find any ancestor slots from tower root ({}) \
towards blockstore root ({}); blockstore pruned or only \
tower moved into new ledger?",
tower_root, last_blockstore_root,
);
}
}
Ok(())
}
#[cfg(test)]
pub mod test {
use super::*;
use crate::{
fork_choice::ForkChoice, heaviest_subtree_fork_choice::SlotHashKey,
replay_stage::HeaviestForkFailures, vote_simulator::VoteSimulator,
};
use solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path};
use solana_runtime::bank::Bank;
use solana_sdk::{
account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
clock::Slot,
hash::Hash,
pubkey::Pubkey,
signature::Signer,
slot_history::SlotHistory,
};
use solana_vote_program::vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY};
use std::{
collections::HashMap,
fs::{remove_file, OpenOptions},
io::{Read, Seek, SeekFrom, Write},
sync::Arc,
};
use tempfile::TempDir;
use trees::tr;
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
let mut stakes = vec![];
for (lamports, votes) in stake_votes {
let mut account = AccountSharedData::from(Account {
data: vec![0; VoteState::size_of()],
lamports: *lamports,
..Account::default()
});
let mut vote_state = VoteState::default();
for slot in *votes {
vote_state.process_slot_vote_unchecked(*slot);
}
VoteState::serialize(
&VoteStateVersions::new_current(vote_state),
&mut account.data_as_mut_slice(),
)
.expect("serialize state");
stakes.push((
solana_sdk::pubkey::new_rand(),
(*lamports, ArcVoteAccount::from(account)),
));
}
stakes
}
#[test]
fn test_to_vote_instruction() {
let vote = Vote::default();
let mut decision = SwitchForkDecision::FailedSwitchThreshold(0, 1);
assert!(decision
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
.is_none());
decision = SwitchForkDecision::FailedSwitchDuplicateRollback(0);
assert!(decision
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
.is_none());
decision = SwitchForkDecision::SameFork;
assert_eq!(
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
Some(vote_instruction::vote(
&Pubkey::default(),
&Pubkey::default(),
vote.clone(),
))
);
decision = SwitchForkDecision::SwitchProof(Hash::default());
assert_eq!(
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
Some(vote_instruction::vote_switch(
&Pubkey::default(),
&Pubkey::default(),
vote,
Hash::default()
))
);
}
#[test]
fn test_simple_votes() {
// Init state
let mut vote_simulator = VoteSimulator::new(1);
let node_pubkey = vote_simulator.node_pubkeys[0];
let mut tower = Tower::default();
// Create the tree of banks
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
// Set the voting behavior
let mut cluster_votes = HashMap::new();
let votes = vec![0, 1, 2, 3, 4, 5];
cluster_votes.insert(node_pubkey, votes.clone());
vote_simulator.fill_bank_forks(forks, &cluster_votes);
// Simulate the votes
for vote in votes {
assert!(vote_simulator
.simulate_vote(vote, &node_pubkey, &mut tower,)
.is_empty());
}
for i in 0..5 {
assert_eq!(tower.vote_state.votes[i].slot as usize, i);
assert_eq!(tower.vote_state.votes[i].confirmation_count as usize, 6 - i);
}
}
#[test]
fn test_switch_threshold_duplicate_rollback() {
run_test_switch_threshold_duplicate_rollback(false);
}
#[test]
#[should_panic]
fn test_switch_threshold_duplicate_rollback_panic() {
run_test_switch_threshold_duplicate_rollback(true);
}
fn setup_switch_test(num_accounts: usize) -> (Arc<Bank>, VoteSimulator, u64) {
// Init state
assert!(num_accounts > 1);
let mut vote_simulator = VoteSimulator::new(num_accounts);
let bank0 = vote_simulator
.bank_forks
.read()
.unwrap()
.get(0)
.unwrap()
.clone();
let total_stake = bank0.total_epoch_stake();
assert_eq!(
total_stake,
vote_simulator.validator_keypairs.len() as u64 * 10_000
);
// Create the tree of banks
let forks = tr(0)
/ (tr(1)
/ (tr(2)
// Minor fork 1
/ (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
/ (tr(43)
/ (tr(44)
// Minor fork 2
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
/ (tr(110)))
/ tr(112))));
// Fill the BankForks according to the above fork structure
vote_simulator.fill_bank_forks(forks, &HashMap::new());
for (_, fork_progress) in vote_simulator.progress.iter_mut() {
fork_progress.fork_stats.computed = true;
}
(bank0, vote_simulator, total_stake)
}
fn run_test_switch_threshold_duplicate_rollback(should_panic: bool) {
let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
let mut tower = Tower::default();
// Last vote is 47
tower.record_vote(
47,
vote_simulator
.bank_forks
.read()
.unwrap()
.get(47)
.unwrap()
.hash(),
);
// Trying to switch to an ancestor of last vote should only not panic
// if the current vote has a duplicate ancestor
let ancestor_of_voted_slot = 43;
let duplicate_ancestor1 = 44;
let duplicate_ancestor2 = 45;
vote_simulator
.heaviest_subtree_fork_choice
.mark_fork_invalid_candidate(&(
duplicate_ancestor1,
vote_simulator
.bank_forks
.read()
.unwrap()
.get(duplicate_ancestor1)
.unwrap()
.hash(),
));
vote_simulator
.heaviest_subtree_fork_choice
.mark_fork_invalid_candidate(&(
duplicate_ancestor2,
vote_simulator
.bank_forks
.read()
.unwrap()
.get(duplicate_ancestor2)
.unwrap()
.hash(),
));
assert_eq!(
tower.check_switch_threshold(
ancestor_of_voted_slot,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
);
let mut confirm_ancestors = vec![duplicate_ancestor1];
if should_panic {
// Adding the last duplicate ancestor will
// 1) Cause loop below to confirm last ancestor
// 2) Check switch threshold on a vote ancestor when there
// are no duplicates on that fork, which will cause a panic
confirm_ancestors.push(duplicate_ancestor2);
}
for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
vote_simulator
.heaviest_subtree_fork_choice
.mark_fork_valid_candidate(&(
duplicate_ancestor,
vote_simulator
.bank_forks
.read()
.unwrap()
.get(duplicate_ancestor)
.unwrap()
.hash(),
));
let res = tower.check_switch_threshold(
ancestor_of_voted_slot,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
);
if i == 0 {
assert_eq!(
res,
SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
);
}
}
}
#[test]
fn test_switch_threshold() {
let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let mut descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
let mut tower = Tower::default();
let other_vote_account = vote_simulator.vote_pubkeys[1];
// Last vote is 47
tower.record_vote(47, Hash::default());
// Trying to switch to a descendant of last vote should always work
assert_eq!(
tower.check_switch_threshold(
48,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::SameFork
);
// Trying to switch to another fork at 110 should fail
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a descendant of last vote should
// not count toward the switch threshold
vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on an ancestor of last vote should
// not count toward the switch threshold
vote_simulator.simulate_lockout_interval(50, (45, 100), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a different fork, but the lockout
// doesn't cover the last vote, should not satisfy the switch threshold
vote_simulator.simulate_lockout_interval(14, (12, 46), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a different fork, and the lockout
// covers the last vote would count towards the switch threshold,
// unless the bank is not the most recent frozen bank on the fork (14 is a
// frozen/computed bank > 13 on the same fork in this case)
vote_simulator.simulate_lockout_interval(13, (12, 47), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a different fork, and the lockout
// covers the last vote, should satisfy the switch threshold
vote_simulator.simulate_lockout_interval(14, (12, 47), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::SwitchProof(Hash::default())
);
// Adding another unfrozen descendant of the tip of 14 should not remove
// slot 14 from consideration because it is still the most recent frozen
// bank on its fork
descendants.get_mut(&14).unwrap().insert(10000);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::SwitchProof(Hash::default())
);
// If we set a root, then any lockout intervals below the root shouldn't
// count toward the switch threshold. This means the other validator's
// vote lockout no longer counts
tower.vote_state.root_slot = Some(43);
// Refresh ancestors and descendants for new root.
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
}
#[test]
fn test_switch_threshold_use_gossip_votes() {
let num_validators = 2;
let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
let mut tower = Tower::default();
let other_vote_account = vote_simulator.vote_pubkeys[1];
// Last vote is 47
tower.record_vote(47, Hash::default());
// Trying to switch to another fork at 110 should fail
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
);
// Adding a vote on the descendant shouldn't count toward the switch threshold
vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding a later vote from gossip that isn't on the same fork should count toward the
// switch threshold
vote_simulator
.latest_validator_votes_for_frozen_banks
.check_add_vote(
other_vote_account,
112,
Some(
vote_simulator
.bank_forks
.read()
.unwrap()
.get(112)
.unwrap()
.hash(),
),
false,
);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::SwitchProof(Hash::default())
);
// If we now set a root that causes slot 112 to be purged from BankForks, then
// the switch proof will now fail since that validator's vote can no longer be
// included in the switching proof
vote_simulator.set_root(44);
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
}
#[test]
fn test_switch_threshold_votes() {
// Init state
let mut vote_simulator = VoteSimulator::new(4);
let node_pubkey = vote_simulator.node_pubkeys[0];
let mut tower = Tower::default();
let forks = tr(0)
/ (tr(1)
/ (tr(2)
// Minor fork 1
/ (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
/ (tr(43)
/ (tr(44)
// Minor fork 2
/ (tr(45) / (tr(46))))
/ (tr(110)))));
// Have two validators, each representing 20% of the stake vote on
// minor fork 2 at slots 46 + 47
let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
cluster_votes.insert(vote_simulator.node_pubkeys[1], vec![46]);
cluster_votes.insert(vote_simulator.node_pubkeys[2], vec![47]);
vote_simulator.fill_bank_forks(forks, &cluster_votes);
// Vote on the first minor fork at slot 14, should succeed
assert!(vote_simulator
.simulate_vote(14, &node_pubkey, &mut tower,)
.is_empty());
// The other two validators voted at slots 46, 47, which
// will only both show up in slot 48, at which point
// 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted
// on another fork, so switching should succeed
let votes_to_simulate = (46..=48).collect();
let results = vote_simulator.create_and_vote_new_branch(
45,
48,
&cluster_votes,
&votes_to_simulate,
&node_pubkey,
&mut tower,
);
for slot in 46..=48 {
if slot == 48 {
assert!(results.get(&slot).unwrap().is_empty());
} else {
assert_eq!(
*results.get(&slot).unwrap(),
vec![HeaviestForkFailures::FailedSwitchThreshold(slot)]
);
}
}
}
#[test]
fn test_double_partition() {
// Init state
let mut vote_simulator = VoteSimulator::new(2);
let node_pubkey = vote_simulator.node_pubkeys[0];
let vote_pubkey = vote_simulator.vote_pubkeys[0];
let mut tower = Tower::default();
let num_slots_to_try = 200;
// Create the tree of banks
let forks = tr(0)
/ (tr(1)
/ (tr(2)
/ (tr(3)
/ (tr(4)
/ (tr(5)
/ (tr(6)
/ (tr(7)
/ (tr(8)
/ (tr(9)
// Minor fork 1
/ (tr(10) / (tr(11) / (tr(12) / (tr(13) / (tr(14))))))
/ (tr(43)
/ (tr(44)
// Minor fork 2
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
/ (tr(110) / (tr(110 + 2 * num_slots_to_try))))))))))))));
// Set the successful voting behavior
let mut cluster_votes = HashMap::new();
let mut my_votes: Vec<Slot> = vec![];
let next_unlocked_slot = 110;
// Vote on the first minor fork
my_votes.extend(0..=14);
// Come back to the main fork
my_votes.extend(43..=44);
// Vote on the second minor fork
my_votes.extend(45..=50);
// Vote to come back to main fork
my_votes.push(next_unlocked_slot);
cluster_votes.insert(node_pubkey, my_votes.clone());
// Make the other validator vote fork to pass the threshold checks
let other_votes = my_votes.clone();
cluster_votes.insert(vote_simulator.node_pubkeys[1], other_votes);
vote_simulator.fill_bank_forks(forks, &cluster_votes);
// Simulate the votes.
for vote in &my_votes {
// All these votes should be ok
assert!(vote_simulator
.simulate_vote(*vote, &node_pubkey, &mut tower,)
.is_empty());
}
info!("local tower: {:#?}", tower.vote_state.votes);
let observed = vote_simulator
.bank_forks
.read()
.unwrap()
.get(next_unlocked_slot)
.unwrap()
.get_vote_account(&vote_pubkey)
.unwrap();
let state = observed.1.vote_state();
info!("observed tower: {:#?}", state.as_ref().unwrap().votes);
let num_slots_to_try = 200;
cluster_votes
.get_mut(&vote_simulator.node_pubkeys[1])
.unwrap()
.extend(next_unlocked_slot + 1..next_unlocked_slot + num_slots_to_try);
assert!(vote_simulator.can_progress_on_fork(
&node_pubkey,
&mut tower,
next_unlocked_slot,
num_slots_to_try,
&mut cluster_votes,
));
}
#[test]
fn test_collect_vote_lockouts_sums() {
//two accounts voting for slot 0 with 1 token staked
let mut accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
accounts.sort_by_key(|(pk, _)| *pk);
let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
.iter()
.map(|(pubkey, _)| (*pubkey, (0, Hash::default())))
.collect();
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
.into_iter()
.collect();
let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks::default();
let ComputedBankState {
voted_stakes,
total_stake,
bank_weight,
..
} = Tower::collect_vote_lockouts(
&Pubkey::default(),
1,
accounts.into_iter(),
&ancestors,
|_| Some(Hash::default()),
&mut latest_validator_votes_for_frozen_banks,
);
assert_eq!(voted_stakes[&0], 2);
assert_eq!(total_stake, 2);
let mut new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(0);
new_votes.sort();
assert_eq!(new_votes, account_latest_votes);
// Each account has 1 vote in it. After simulating a vote in collect_vote_lockouts,
// the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for
assert_eq!(bank_weight, 12)
}
#[test]
fn test_collect_vote_lockouts_root() {
let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).collect();
//two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
let mut accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
accounts.sort_by_key(|(pk, _)| *pk);
let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts
.iter()
.map(|(pubkey, _)| {
(
*pubkey,
((MAX_LOCKOUT_HISTORY - 1) as Slot, Hash::default()),
)
})
.collect();
let mut tower = Tower::new_for_tests(0, 0.67);
let mut ancestors = HashMap::new();
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
tower.record_vote(i as u64, Hash::default());
ancestors.insert(i as u64, (0..i as u64).collect());
}
let root = Lockout {
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
slot: 0,
};
let root_weight = root.lockout() as u128;
let vote_account_expected_weight = tower
.vote_state
.votes
.iter()
.map(|v| v.lockout() as u128)
.sum::<u128>()
+ root_weight;
let expected_bank_weight = 2 * vote_account_expected_weight;
assert_eq!(tower.vote_state.root_slot, Some(0));
let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks::default();
let ComputedBankState {
voted_stakes,
bank_weight,
..
} = Tower::collect_vote_lockouts(
&Pubkey::default(),
MAX_LOCKOUT_HISTORY as u64,
accounts.into_iter(),
&ancestors,
|_| Some(Hash::default()),
&mut latest_validator_votes_for_frozen_banks,
);
for i in 0..MAX_LOCKOUT_HISTORY {
assert_eq!(voted_stakes[&(i as u64)], 2);
}
// should be the sum of all the weights for root
assert_eq!(bank_weight, expected_bank_weight);
let mut new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root.slot);
new_votes.sort();
assert_eq!(new_votes, account_latest_votes);
}
#[test]
fn test_check_vote_threshold_without_votes() {
let tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![(0, 1)].into_iter().collect();
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(4, 0.67);
let mut stakes = HashMap::new();
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
stakes.insert(i, 1);
tower.record_vote(i, Hash::default());
}
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,));
}
#[test]
fn test_is_slot_confirmed_not_enough_stake_failure() {
let tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![(0, 1)].into_iter().collect();
assert!(!tower.is_slot_confirmed(0, &stakes, 2));
}
#[test]
fn test_is_slot_confirmed_unknown_slot() {
let tower = Tower::new_for_tests(1, 0.67);
let stakes = HashMap::new();
assert!(!tower.is_slot_confirmed(0, &stakes, 2));
}
#[test]
fn test_is_slot_confirmed_pass() {
let tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![(0, 2)].into_iter().collect();
assert!(tower.is_slot_confirmed(0, &stakes, 2));
}
#[test]
fn test_is_locked_out_empty() {
let tower = Tower::new_for_tests(0, 0.67);
let ancestors = HashSet::new();
assert!(!tower.is_locked_out(0, &ancestors));
}
#[test]
fn test_is_locked_out_root_slot_child_pass() {
let mut tower = Tower::new_for_tests(0, 0.67);
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
tower.vote_state.root_slot = Some(0);
assert!(!tower.is_locked_out(1, &ancestors));
}
#[test]
fn test_is_locked_out_root_slot_sibling_fail() {
let mut tower = Tower::new_for_tests(0, 0.67);
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
tower.vote_state.root_slot = Some(0);
tower.record_vote(1, Hash::default());
assert!(tower.is_locked_out(2, &ancestors));
}
#[test]
fn test_check_already_voted() {
let mut tower = Tower::new_for_tests(0, 0.67);
tower.record_vote(0, Hash::default());
assert!(tower.has_voted(0));
assert!(!tower.has_voted(1));
}
#[test]
fn test_check_recent_slot() {
let mut tower = Tower::new_for_tests(0, 0.67);
assert!(tower.is_recent(0));
assert!(tower.is_recent(32));
for i in 0..64 {
tower.record_vote(i, Hash::default());
}
assert!(!tower.is_recent(0));
assert!(!tower.is_recent(32));
assert!(!tower.is_recent(63));
assert!(tower.is_recent(65));
}
#[test]
fn test_is_locked_out_double_vote() {
let mut tower = Tower::new_for_tests(0, 0.67);
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
assert!(tower.is_locked_out(0, &ancestors));
}
#[test]
fn test_is_locked_out_child() {
let mut tower = Tower::new_for_tests(0, 0.67);
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
tower.record_vote(0, Hash::default());
assert!(!tower.is_locked_out(1, &ancestors));
}
#[test]
fn test_is_locked_out_sibling() {
let mut tower = Tower::new_for_tests(0, 0.67);
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
assert!(tower.is_locked_out(2, &ancestors));
}
#[test]
fn test_is_locked_out_last_vote_expired() {
let mut tower = Tower::new_for_tests(0, 0.67);
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
assert!(!tower.is_locked_out(4, &ancestors));
tower.record_vote(4, Hash::default());
assert_eq!(tower.vote_state.votes[0].slot, 0);
assert_eq!(tower.vote_state.votes[0].confirmation_count, 2);
assert_eq!(tower.vote_state.votes[1].slot, 4);
assert_eq!(tower.vote_state.votes[1].confirmation_count, 1);
}
#[test]
fn test_check_vote_threshold_below_threshold() {
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![(0, 1)].into_iter().collect();
tower.record_vote(0, Hash::default());
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_above_threshold() {
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![(0, 2)].into_iter().collect();
tower.record_vote(0, Hash::default());
assert!(tower.check_vote_stake_threshold(1, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_above_threshold_after_pop() {
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![(0, 2)].into_iter().collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
assert!(tower.check_vote_stake_threshold(6, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_above_threshold_no_stake() {
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = HashMap::new();
tower.record_vote(0, Hash::default());
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_lockouts_not_updated() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![(0, 1), (1, 2)].into_iter().collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
assert!(tower.check_vote_stake_threshold(6, &stakes, 2,));
}
#[test]
fn test_stake_is_updated_for_entire_branch() {
let mut voted_stakes = HashMap::new();
let account = AccountSharedData::from(Account {
lamports: 1,
..Account::default()
});
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports(), &ancestors);
assert_eq!(voted_stakes[&0], 1);
assert_eq!(voted_stakes[&1], 1);
assert_eq!(voted_stakes[&2], 1);
}
#[test]
fn test_apply_vote_and_generate_vote_diff() {
let mut local = VoteState::default();
let vote = Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), None);
assert_eq!(local.votes.len(), 1);
assert_eq!(vote.slots, vec![0]);
assert_eq!(local.tower(), vec![0]);
}
#[test]
fn test_apply_vote_and_generate_vote_diff_dup_vote() {
let mut local = VoteState::default();
// If `latest_voted_slot_in_bank == Some(0)`, then we already have a vote for 0. Adding
// another vote for slot 0 should return an empty vote as the diff.
let vote =
Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), Some(0));
assert!(vote.slots.is_empty());
}
#[test]
fn test_apply_vote_and_generate_vote_diff_next_vote() {
let mut local = VoteState::default();
let vote = Vote {
slots: vec![0],
hash: Hash::default(),
timestamp: None,
};
local.process_vote_unchecked(&vote);
assert_eq!(local.votes.len(), 1);
let vote =
Tower::apply_vote_and_generate_vote_diff(&mut local, 1, Hash::default(), Some(0));
assert_eq!(vote.slots, vec![1]);
assert_eq!(local.tower(), vec![0, 1]);
}
#[test]
fn test_apply_vote_and_generate_vote_diff_next_after_expired_vote() {
let mut local = VoteState::default();
let vote = Vote {
slots: vec![0],
hash: Hash::default(),
timestamp: None,
};
local.process_vote_unchecked(&vote);
assert_eq!(local.votes.len(), 1);
// First vote expired, so should be evicted from tower. Thus even with
// `latest_voted_slot_in_bank == Some(0)`, the first vote slot won't be
// observable in any of the results.
let vote =
Tower::apply_vote_and_generate_vote_diff(&mut local, 3, Hash::default(), Some(0));
assert_eq!(vote.slots, vec![3]);
assert_eq!(local.tower(), vec![3]);
}
#[test]
fn test_check_vote_threshold_forks() {
// Create the ancestor relationships
let ancestors = (0..=(VOTE_THRESHOLD_DEPTH + 1) as u64)
.map(|slot| {
let slot_parents: HashSet<_> = (0..slot).collect();
(slot, slot_parents)
})
.collect();
// Create votes such that
// 1) 3/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH - 2, lockout: 2
// 2) 1/4 of the stake has voted on slot: VOTE_THRESHOLD_DEPTH, lockout: 2^9
let total_stake = 4;
let threshold_size = 0.67;
let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
let tower_votes: Vec<Slot> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
let accounts = gen_stakes(&[
(threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
(total_stake - threshold_stake, &tower_votes[..]),
]);
// Initialize tower
let mut tower = Tower::new_for_tests(VOTE_THRESHOLD_DEPTH, threshold_size);
// CASE 1: Record the first VOTE_THRESHOLD tower votes for fork 2. We want to
// evaluate a vote on slot VOTE_THRESHOLD_DEPTH. The nth most recent vote should be
// for slot 0, which is common to all account vote states, so we should pass the
// threshold check
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64;
for vote in &tower_votes {
tower.record_vote(*vote, Hash::default());
}
let ComputedBankState {
voted_stakes,
total_stake,
..
} = Tower::collect_vote_lockouts(
&Pubkey::default(),
vote_to_evaluate,
accounts.clone().into_iter(),
&ancestors,
|_| None,
&mut LatestValidatorVotesForFrozenBanks::default(),
);
assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &voted_stakes, total_stake,));
// CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
// will expire the vote in one of the vote accounts, so we should have insufficient
// stake to pass the threshold
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
let ComputedBankState {
voted_stakes,
total_stake,
..
} = Tower::collect_vote_lockouts(
&Pubkey::default(),
vote_to_evaluate,
accounts.into_iter(),
&ancestors,
|_| None,
&mut LatestValidatorVotesForFrozenBanks::default(),
);
assert!(!tower.check_vote_stake_threshold(vote_to_evaluate, &voted_stakes, total_stake,));
}
fn vote_and_check_recent(num_votes: usize) {
let mut tower = Tower::new_for_tests(1, 0.67);
let slots = if num_votes > 0 {
vec![num_votes as u64 - 1]
} else {
vec![]
};
let mut expected = Vote::new(slots, Hash::default());
for i in 0..num_votes {
tower.record_vote(i as u64, Hash::default());
}
expected.timestamp = tower.last_vote.timestamp;
assert_eq!(expected, tower.last_vote)
}
#[test]
fn test_recent_votes_full() {
vote_and_check_recent(MAX_LOCKOUT_HISTORY)
}
#[test]
fn test_recent_votes_empty() {
vote_and_check_recent(0)
}
#[test]
fn test_recent_votes_exact() {
vote_and_check_recent(5)
}
#[test]
fn test_maybe_timestamp() {
let mut tower = Tower::default();
assert!(tower.maybe_timestamp(0).is_some());
assert!(tower.maybe_timestamp(1).is_some());
assert!(tower.maybe_timestamp(0).is_none()); // Refuse to timestamp an older slot
assert!(tower.maybe_timestamp(1).is_none()); // Refuse to timestamp the same slot twice
tower.last_timestamp.timestamp -= 1; // Move last_timestamp into the past
assert!(tower.maybe_timestamp(2).is_some()); // slot 2 gets a timestamp
tower.last_timestamp.timestamp += 1_000_000; // Move last_timestamp well into the future
assert!(tower.maybe_timestamp(3).is_none()); // slot 3 gets no timestamp
}
fn run_test_load_tower_snapshot<F, G>(
modify_original: F,
modify_serialized: G,
) -> (Tower, Result<Tower>)
where
F: Fn(&mut Tower, &Pubkey),
G: Fn(&PathBuf),
{
let dir = TempDir::new().unwrap();
let identity_keypair = Arc::new(Keypair::new());
// Use values that will not match the default derived from BankForks
let mut tower = Tower::new_for_tests(10, 0.9);
tower.ledger_path = dir.path().to_path_buf();
tower.path = Tower::get_filename(&tower.ledger_path, &identity_keypair.pubkey());
tower.tmp_path = Tower::get_tmp_filename(&tower.path);
modify_original(&mut tower, &identity_keypair.pubkey());
tower.save(&identity_keypair).unwrap();
modify_serialized(&tower.path);
let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey());
(tower, loaded)
}
#[test]
fn test_switch_threshold_across_tower_reload() {
solana_logger::setup();
// Init state
let mut vote_simulator = VoteSimulator::new(2);
let other_vote_account = vote_simulator.vote_pubkeys[1];
let bank0 = vote_simulator
.bank_forks
.read()
.unwrap()
.get(0)
.unwrap()
.clone();
let total_stake = bank0.total_epoch_stake();
assert_eq!(
total_stake,
vote_simulator.validator_keypairs.len() as u64 * 10_000
);
// Create the tree of banks
let forks = tr(0)
/ (tr(1)
/ (tr(2)
/ tr(10)
/ (tr(43)
/ (tr(44)
// Minor fork 2
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
/ (tr(110) / tr(111))))));
// Fill the BankForks according to the above fork structure
vote_simulator.fill_bank_forks(forks, &HashMap::new());
for (_, fork_progress) in vote_simulator.progress.iter_mut() {
fork_progress.fork_stats.computed = true;
}
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
let mut tower = Tower::default();
tower.record_vote(43, Hash::default());
tower.record_vote(44, Hash::default());
tower.record_vote(45, Hash::default());
tower.record_vote(46, Hash::default());
tower.record_vote(47, Hash::default());
tower.record_vote(48, Hash::default());
tower.record_vote(49, Hash::default());
// Trying to switch to a descendant of last vote should always work
assert_eq!(
tower.check_switch_threshold(
50,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::SameFork
);
// Trying to switch to another fork at 110 should fail
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
vote_simulator.simulate_lockout_interval(111, (10, 49), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::SwitchProof(Hash::default())
);
assert_eq!(tower.voted_slots(), vec![43, 44, 45, 46, 47, 48, 49]);
{
let mut tower = tower.clone();
tower.record_vote(110, Hash::default());
tower.record_vote(111, Hash::default());
assert_eq!(tower.voted_slots(), vec![43, 110, 111]);
assert_eq!(tower.vote_state.root_slot, Some(0));
}
// Prepare simulated validator restart!
let mut vote_simulator = VoteSimulator::new(2);
let other_vote_account = vote_simulator.vote_pubkeys[1];
let bank0 = vote_simulator
.bank_forks
.read()
.unwrap()
.get(0)
.unwrap()
.clone();
let total_stake = bank0.total_epoch_stake();
let forks = tr(0)
/ (tr(1)
/ (tr(2)
/ tr(10)
/ (tr(43)
/ (tr(44)
// Minor fork 2
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
/ (tr(110) / tr(111))))));
let replayed_root_slot = 44;
// Fill the BankForks according to the above fork structure
vote_simulator.fill_bank_forks(forks, &HashMap::new());
for (_, fork_progress) in vote_simulator.progress.iter_mut() {
fork_progress.fork_stats.computed = true;
}
// prepend tower restart!
let mut slot_history = SlotHistory::default();
vote_simulator.set_root(replayed_root_slot);
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
for slot in &[0, 1, 2, 43, replayed_root_slot] {
slot_history.add(*slot);
}
let mut tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![45, 46, 47, 48, 49]);
// Trying to switch to another fork at 110 should fail
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Add lockout_interval which should be excluded
vote_simulator.simulate_lockout_interval(111, (45, 50), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Add lockout_interval which should not be excluded
vote_simulator.simulate_lockout_interval(111, (110, 200), &other_vote_account);
assert_eq!(
tower.check_switch_threshold(
110,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
&vote_simulator.latest_validator_votes_for_frozen_banks,
&vote_simulator.heaviest_subtree_fork_choice,
),
SwitchForkDecision::SwitchProof(Hash::default())
);
tower.record_vote(110, Hash::default());
tower.record_vote(111, Hash::default());
assert_eq!(tower.voted_slots(), vec![110, 111]);
assert_eq!(tower.vote_state.root_slot, Some(replayed_root_slot));
}
#[test]
fn test_load_tower_ok() {
let (tower, loaded) =
run_test_load_tower_snapshot(|tower, pubkey| tower.node_pubkey = *pubkey, |_| ());
let loaded = loaded.unwrap();
assert_eq!(loaded, tower);
assert_eq!(tower.threshold_depth, 10);
assert!((tower.threshold_size - 0.9_f64).abs() < f64::EPSILON);
assert_eq!(loaded.threshold_depth, 10);
assert!((loaded.threshold_size - 0.9_f64).abs() < f64::EPSILON);
}
#[test]
fn test_load_tower_wrong_identity() {
let identity_keypair = Arc::new(Keypair::new());
let tower = Tower::default();
assert_matches!(
tower.save(&identity_keypair),
Err(TowerError::WrongTower(_))
)
}
#[test]
fn test_load_tower_invalid_signature() {
let (_, loaded) = run_test_load_tower_snapshot(
|tower, pubkey| tower.node_pubkey = *pubkey,
|path| {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.open(path)
.unwrap();
let mut buf = [0u8];
assert_eq!(file.read(&mut buf).unwrap(), 1);
buf[0] = !buf[0];
assert_eq!(file.seek(SeekFrom::Start(0)).unwrap(), 0);
assert_eq!(file.write(&buf).unwrap(), 1);
},
);
assert_matches!(loaded, Err(TowerError::InvalidSignature))
}
#[test]
fn test_load_tower_deser_failure() {
let (_, loaded) = run_test_load_tower_snapshot(
|tower, pubkey| tower.node_pubkey = *pubkey,
|path| {
OpenOptions::new()
.write(true)
.truncate(true)
.open(&path)
.unwrap_or_else(|_| panic!("Failed to truncate file: {:?}", path));
},
);
assert_matches!(loaded, Err(TowerError::SerializeError(_)))
}
#[test]
fn test_load_tower_missing() {
let (_, loaded) = run_test_load_tower_snapshot(
|tower, pubkey| tower.node_pubkey = *pubkey,
|path| {
remove_file(path).unwrap();
},
);
assert_matches!(loaded, Err(TowerError::IoError(_)))
}
#[test]
fn test_reconcile_blockstore_roots_with_tower_normal() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_slot_entries(1, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
let (shreds, _) = make_slot_entries(3, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
let (shreds, _) = make_slot_entries(4, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(!blockstore.is_root(0));
assert!(!blockstore.is_root(1));
assert!(!blockstore.is_root(3));
assert!(!blockstore.is_root(4));
let mut tower = Tower::default();
tower.vote_state.root_slot = Some(4);
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
assert!(!blockstore.is_root(0));
assert!(blockstore.is_root(1));
assert!(!blockstore.is_root(3));
assert!(blockstore.is_root(4));
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
#[should_panic(expected = "couldn't find a last_blockstore_root upwards from: 4!?")]
fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_slot_entries(1, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
let (shreds, _) = make_slot_entries(3, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
let (shreds, _) = make_slot_entries(4, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(std::iter::once(&3)).unwrap();
assert!(!blockstore.is_root(0));
assert!(!blockstore.is_root(1));
assert!(blockstore.is_root(3));
assert!(!blockstore.is_root(4));
let mut tower = Tower::default();
tower.vote_state.root_slot = Some(4);
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_slot_entries(1, 0, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
let (shreds, _) = make_slot_entries(3, 1, 42);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(!blockstore.is_root(0));
assert!(!blockstore.is_root(1));
assert!(!blockstore.is_root(3));
let mut tower = Tower::default();
tower.vote_state.root_slot = Some(4);
assert_eq!(blockstore.last_root(), 0);
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
assert_eq!(blockstore.last_root(), 0);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_adjust_lockouts_after_replay_future_slots() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(10, 0.9);
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
tower.record_vote(3, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
let replayed_root_slot = 1;
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), replayed_root_slot);
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
fn test_adjust_lockouts_after_replay_not_found_slots() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
tower.record_vote(3, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
slot_history.add(4);
let replayed_root_slot = 4;
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
fn test_adjust_lockouts_after_replay_all_rooted_with_no_too_old() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
slot_history.add(2);
slot_history.add(3);
slot_history.add(4);
slot_history.add(5);
let replayed_root_slot = 5;
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), replayed_root_slot);
assert_eq!(tower.stray_restored_slot, None);
}
#[test]
fn test_adjust_lockouts_after_replay_all_rooted_with_too_old() {
use solana_sdk::slot_history::MAX_ENTRIES;
let mut tower = Tower::new_for_tests(10, 0.9);
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
slot_history.add(2);
slot_history.add(MAX_ENTRIES);
tower = tower
.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), MAX_ENTRIES);
}
#[test]
fn test_adjust_lockouts_after_replay_anchored_future_slots() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
tower.record_vote(3, Hash::default());
tower.record_vote(4, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
slot_history.add(2);
let replayed_root_slot = 2;
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![3, 4]);
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
fn test_adjust_lockouts_after_replay_all_not_found() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.record_vote(5, Hash::default());
tower.record_vote(6, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
slot_history.add(2);
slot_history.add(7);
let replayed_root_slot = 7;
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![5, 6]);
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
fn test_adjust_lockouts_after_replay_all_not_found_even_if_rooted() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.root_slot = Some(4);
tower.record_vote(5, Hash::default());
tower.record_vote(6, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
slot_history.add(2);
slot_history.add(7);
let replayed_root_slot = 7;
let result = tower.adjust_lockouts_after_replay(replayed_root_slot, &slot_history);
assert_eq!(
format!("{}", result.unwrap_err()),
"The tower is fatally inconsistent with blockstore: no common slot for rooted tower"
);
}
#[test]
fn test_adjust_lockouts_after_replay_all_future_votes_only_root_found() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.root_slot = Some(2);
tower.record_vote(3, Hash::default());
tower.record_vote(4, Hash::default());
tower.record_vote(5, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(1);
slot_history.add(2);
let replayed_root_slot = 2;
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![3, 4, 5]);
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
fn test_adjust_lockouts_after_replay_empty() {
let mut tower = Tower::new_for_tests(10, 0.9);
let mut slot_history = SlotHistory::default();
slot_history.add(0);
let replayed_root_slot = 0;
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
fn test_adjust_lockouts_after_replay_too_old_tower() {
use solana_sdk::slot_history::MAX_ENTRIES;
let mut tower = Tower::new_for_tests(10, 0.9);
tower.record_vote(0, Hash::default());
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(MAX_ENTRIES);
let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
assert_eq!(
format!("{}", result.unwrap_err()),
"The tower is too old: newest slot in tower (0) << oldest slot in available history (1)"
);
}
#[test]
fn test_adjust_lockouts_after_replay_time_warped() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.votes.push_back(Lockout::new(1));
tower.vote_state.votes.push_back(Lockout::new(0));
let vote = Vote::new(vec![0], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
let result = tower.adjust_lockouts_after_replay(0, &slot_history);
assert_eq!(
format!("{}", result.unwrap_err()),
"The tower is fatally inconsistent with blockstore: time warped?"
);
}
#[test]
fn test_adjust_lockouts_after_replay_diverged_ancestor() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.votes.push_back(Lockout::new(1));
tower.vote_state.votes.push_back(Lockout::new(2));
let vote = Vote::new(vec![2], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
let result = tower.adjust_lockouts_after_replay(2, &slot_history);
assert_eq!(
format!("{}", result.unwrap_err()),
"The tower is fatally inconsistent with blockstore: diverged ancestor?"
);
}
#[test]
fn test_adjust_lockouts_after_replay_out_of_order() {
use solana_sdk::slot_history::MAX_ENTRIES;
let mut tower = Tower::new_for_tests(10, 0.9);
tower
.vote_state
.votes
.push_back(Lockout::new(MAX_ENTRIES - 1));
tower.vote_state.votes.push_back(Lockout::new(0));
tower.vote_state.votes.push_back(Lockout::new(1));
let vote = Vote::new(vec![1], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(MAX_ENTRIES);
let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
assert_eq!(
format!("{}", result.unwrap_err()),
"The tower is fatally inconsistent with blockstore: not too old once after got too old?"
);
}
#[test]
#[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")]
fn test_adjust_lockouts_after_replay_reversed_votes() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.votes.push_back(Lockout::new(2));
tower.vote_state.votes.push_back(Lockout::new(1));
let vote = Vote::new(vec![1], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
}
#[test]
#[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")]
fn test_adjust_lockouts_after_replay_repeated_non_root_votes() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.votes.push_back(Lockout::new(2));
tower.vote_state.votes.push_back(Lockout::new(3));
tower.vote_state.votes.push_back(Lockout::new(3));
let vote = Vote::new(vec![3], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
}
#[test]
fn test_adjust_lockouts_after_replay_vote_on_root() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.root_slot = Some(42);
tower.vote_state.votes.push_back(Lockout::new(42));
tower.vote_state.votes.push_back(Lockout::new(43));
tower.vote_state.votes.push_back(Lockout::new(44));
let vote = Vote::new(vec![44], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(42);
let tower = tower.adjust_lockouts_after_replay(42, &slot_history);
assert_eq!(tower.unwrap().voted_slots(), [43, 44]);
}
#[test]
fn test_adjust_lockouts_after_replay_vote_on_genesis() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.votes.push_back(Lockout::new(0));
let vote = Vote::new(vec![0], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
assert!(tower.adjust_lockouts_after_replay(0, &slot_history).is_ok());
}
#[test]
fn test_adjust_lockouts_after_replay_future_tower() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.vote_state.votes.push_back(Lockout::new(13));
tower.vote_state.votes.push_back(Lockout::new(14));
let vote = Vote::new(vec![14], Hash::default());
tower.last_vote = vote;
tower.initialize_root(12);
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
let tower = tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
assert_eq!(tower.root(), 12);
assert_eq!(tower.voted_slots(), vec![13, 14]);
assert_eq!(tower.stray_restored_slot, Some(14));
}
}
| 39.568627 | 187 | 0.565977 |
ab145b677d939590343d1d1fa8bfd3bc56ed9441 | 12,214 | //! This module contains code for snapshotting a database chunk to Parquet
//! files in object storage.
use arrow_deps::{
arrow::record_batch::RecordBatch,
parquet::{self, arrow::ArrowWriter, file::writer::TryClone},
};
use data_types::partition_metadata::{Partition as PartitionMeta, Table};
use object_store::ObjectStore;
use query::PartitionChunk;
use std::io::{Cursor, Seek, SeekFrom, Write};
use std::sync::{Arc, Mutex};
use bytes::Bytes;
use snafu::{ResultExt, Snafu};
use tokio::sync::oneshot;
use tracing::{error, info};
use uuid::Uuid;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Partition error creating snapshot: {}", source))]
PartitionError {
source: Box<dyn std::error::Error + Send + Sync>,
},
#[snafu(display("Table position out of bounds: {}", position))]
TablePositionOutOfBounds { position: usize },
#[snafu(display("Error generating json response: {}", source))]
JsonGenerationError { source: serde_json::Error },
#[snafu(display("Error opening Parquet Writer: {}", source))]
OpeningParquetWriter {
source: parquet::errors::ParquetError,
},
#[snafu(display("Error writing Parquet to memory: {}", source))]
WritingParquetToMemory {
source: parquet::errors::ParquetError,
},
#[snafu(display("Error closing Parquet Writer: {}", source))]
ClosingParquetWriter {
source: parquet::errors::ParquetError,
},
#[snafu(display("Error writing to object store: {}", source))]
WritingToObjectStore { source: object_store::Error },
#[snafu(display("Stopped early"))]
StoppedEarly,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug)]
pub struct Snapshot<T>
where
T: Send + Sync + 'static + PartitionChunk,
{
pub id: Uuid,
pub partition_meta: PartitionMeta,
pub metadata_path: String,
pub data_path: String,
store: Arc<ObjectStore>,
partition: Arc<T>,
status: Mutex<Status>,
}
impl<T> Snapshot<T>
where
T: Send + Sync + 'static + PartitionChunk,
{
fn new(
partition_key: String,
metadata_path: String,
data_path: String,
store: Arc<ObjectStore>,
partition: Arc<T>,
tables: Vec<Table>,
) -> Self {
let table_states = vec![TableState::NotStarted; tables.len()];
let status = Status {
table_states,
..Default::default()
};
Self {
id: Uuid::new_v4(),
partition_meta: PartitionMeta {
key: partition_key,
tables,
},
metadata_path,
data_path,
store,
partition,
status: Mutex::new(status),
}
}
// returns the position of the next table
fn next_table(&self) -> Option<(usize, &str)> {
let mut status = self.status.lock().expect("mutex poisoned");
status
.table_states
.iter()
.position(|s| s == &TableState::NotStarted)
.map(|pos| {
status.table_states[pos] = TableState::Running;
(pos, &*self.partition_meta.tables[pos].name)
})
}
fn mark_table_finished(&self, position: usize) {
let mut status = self.status.lock().expect("mutex poisoned");
if status.table_states.len() > position {
status.table_states[position] = TableState::Finished;
}
}
fn mark_meta_written(&self) {
let mut status = self.status.lock().expect("mutex poisoned");
status.meta_written = true;
}
pub fn finished(&self) -> bool {
let status = self.status.lock().expect("mutex poisoned");
status
.table_states
.iter()
.all(|state| matches!(state, TableState::Finished))
}
fn should_stop(&self) -> bool {
let status = self.status.lock().expect("mutex poisoned");
status.stop_on_next_update
}
async fn run(&self, notify: Option<oneshot::Sender<()>>) -> Result<()> {
while let Some((pos, table_name)) = self.next_table() {
let mut batches = Vec::new();
self.partition
.table_to_arrow(&mut batches, table_name, &[])
.map_err(|e| Box::new(e) as _)
.context(PartitionError)?;
let file_name = format!("{}/{}.parquet", &self.data_path, table_name);
self.write_batches(batches, &file_name).await?;
self.mark_table_finished(pos);
if self.should_stop() {
return StoppedEarly.fail();
}
}
let partition_meta_path =
format!("{}/{}.json", &self.metadata_path, &self.partition_meta.key);
let json_data = serde_json::to_vec(&self.partition_meta).context(JsonGenerationError)?;
let data = Bytes::from(json_data);
let len = data.len();
let stream_data = std::io::Result::Ok(data);
self.store
.put(
&partition_meta_path,
futures::stream::once(async move { stream_data }),
len,
)
.await
.context(WritingToObjectStore)?;
self.mark_meta_written();
if let Some(notify) = notify {
if let Err(e) = notify.send(()) {
error!("error sending notify: {:?}", e);
}
}
Ok(())
}
async fn write_batches(&self, batches: Vec<RecordBatch>, file_name: &str) -> Result<()> {
let mem_writer = MemWriter::default();
{
let mut writer = ArrowWriter::try_new(mem_writer.clone(), batches[0].schema(), None)
.context(OpeningParquetWriter)?;
for batch in batches.into_iter() {
writer.write(&batch).context(WritingParquetToMemory)?;
}
writer.close().context(ClosingParquetWriter)?;
} // drop the reference to the MemWriter that the SerializedFileWriter has
let data = mem_writer
.into_inner()
.expect("Nothing else should have a reference here");
let len = data.len();
let data = Bytes::from(data);
let stream_data = Result::Ok(data);
self.store
.put(
&file_name,
futures::stream::once(async move { stream_data }),
len,
)
.await
.context(WritingToObjectStore)
}
fn set_error(&self, e: Error) {
let mut status = self.status.lock().expect("mutex poisoned");
status.error = Some(e);
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum TableState {
NotStarted,
Running,
Finished,
}
#[derive(Debug, Default)]
pub struct Status {
table_states: Vec<TableState>,
meta_written: bool,
stop_on_next_update: bool,
error: Option<Error>,
}
pub fn snapshot_chunk<T>(
metadata_path: impl Into<String>,
data_path: impl Into<String>,
store: Arc<ObjectStore>,
partition: Arc<T>,
notify: Option<oneshot::Sender<()>>,
) -> Result<Arc<Snapshot<T>>>
where
T: Send + Sync + 'static + PartitionChunk,
{
let table_stats = partition
.table_stats()
.map_err(|e| Box::new(e) as _)
.context(PartitionError)?;
let snapshot = Snapshot::new(
partition.key().to_string(),
metadata_path.into(),
data_path.into(),
store,
partition,
table_stats,
);
let snapshot = Arc::new(snapshot);
let return_snapshot = snapshot.clone();
tokio::spawn(async move {
info!(
"starting snapshot of {} to {}",
&snapshot.partition_meta.key, &snapshot.data_path
);
if let Err(e) = snapshot.run(notify).await {
error!("error running snapshot: {:?}", e);
snapshot.set_error(e);
}
});
Ok(return_snapshot)
}
#[derive(Debug, Default, Clone)]
struct MemWriter {
mem: Arc<Mutex<Cursor<Vec<u8>>>>,
}
impl MemWriter {
/// Returns the inner buffer as long as there are no other references to the
/// Arc.
pub fn into_inner(self) -> Option<Vec<u8>> {
Arc::try_unwrap(self.mem)
.ok()
.and_then(|mutex| mutex.into_inner().ok())
.map(|cursor| cursor.into_inner())
}
}
impl Write for MemWriter {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let mut inner = self.mem.lock().unwrap();
inner.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
let mut inner = self.mem.lock().unwrap();
inner.flush()
}
}
impl Seek for MemWriter {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
let mut inner = self.mem.lock().unwrap();
inner.seek(pos)
}
}
impl TryClone for MemWriter {
fn try_clone(&self) -> std::io::Result<Self> {
Ok(Self {
mem: self.mem.clone(),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use data_types::data::lines_to_replicated_write;
use data_types::database_rules::DatabaseRules;
use futures::TryStreamExt;
use influxdb_line_protocol::parse_lines;
use mutable_buffer::chunk::Chunk as ChunkWB;
use object_store::InMemory;
#[tokio::test]
async fn snapshot() {
let lp = r#"
cpu,host=A,region=west user=23.2,system=55.1 1
cpu,host=A,region=west user=3.2,system=50.1 10
cpu,host=B,region=east user=10.0,system=74.1 1
mem,host=A,region=west used=45 1
"#;
let lines: Vec<_> = parse_lines(lp).map(|l| l.unwrap()).collect();
let write = lines_to_replicated_write(1, 1, &lines, &DatabaseRules::default());
let mut chunk = ChunkWB::new("testaroo", 11);
for e in write.write_buffer_batch().unwrap().entries().unwrap() {
chunk.write_entry(&e).unwrap();
}
let store = Arc::new(ObjectStore::new_in_memory(InMemory::new()));
let chunk = Arc::new(chunk);
let (tx, rx) = tokio::sync::oneshot::channel();
let metadata_path = "/meta";
let data_path = "/data";
let snapshot = snapshot_chunk(
metadata_path,
data_path,
store.clone(),
chunk.clone(),
Some(tx),
)
.unwrap();
rx.await.unwrap();
let summary = store
.get("/meta/testaroo.json")
.await
.unwrap()
.map_ok(|b| bytes::BytesMut::from(&b[..]))
.try_concat()
.await
.unwrap();
let meta: PartitionMeta = serde_json::from_slice(&*summary).unwrap();
assert_eq!(meta, snapshot.partition_meta);
}
#[test]
fn snapshot_states() {
let tables = vec![
Table {
name: "foo".to_string(),
columns: vec![],
},
Table {
name: "bar".to_string(),
columns: vec![],
},
Table {
name: "asdf".to_string(),
columns: vec![],
},
];
let store = Arc::new(ObjectStore::new_in_memory(InMemory::new()));
let chunk = Arc::new(ChunkWB::new("testaroo", 11));
let metadata_path = "/meta".to_string();
let data_path = "/data".to_string();
let snapshot = Snapshot::new(
chunk.key.clone(),
metadata_path,
data_path,
store,
chunk,
tables,
);
let (pos, name) = snapshot.next_table().unwrap();
assert_eq!(0, pos);
assert_eq!("foo", name);
let (pos, name) = snapshot.next_table().unwrap();
assert_eq!(1, pos);
assert_eq!("bar", name);
snapshot.mark_table_finished(1);
assert!(!snapshot.finished());
let (pos, name) = snapshot.next_table().unwrap();
assert_eq!(2, pos);
assert_eq!("asdf", name);
assert!(snapshot.next_table().is_none());
assert!(!snapshot.finished());
snapshot.mark_table_finished(0);
snapshot.mark_table_finished(2);
assert!(snapshot.finished());
}
}
| 28.207852 | 96 | 0.558785 |
e9848960180961df381341cfd2f92bd971fac9b4 | 900 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum noption<T> { some(T), }
struct Pair { x: int, y: int }
pub fn main() {
let nop: noption<int> = some::<int>(5);
match nop { some::<int>(n) => { println!("{:?}", n); assert!((n == 5)); } }
let nop2: noption<Pair> = some(Pair{x: 17, y: 42});
match nop2 {
some(t) => {
println!("{:?}", t.x);
println!("{:?}", t.y);
assert_eq!(t.x, 17);
assert_eq!(t.y, 42);
}
}
}
| 29.032258 | 79 | 0.597778 |
9b951874e5ed6852bbe07566f93dbbdae53a31f5 | 6,113 | #![cfg(feature = "test_utils")]
use hdk::prelude::Links;
use holochain::sweettest::SweetAgents;
use holochain::sweettest::SweetConductor;
use holochain::sweettest::SweetDnaFile;
use holochain::test_utils::consistency_10s;
use holochain_serialized_bytes::prelude::*;
use holochain_types::prelude::*;
use holochain_wasm_test_utils::TestWasm;
#[derive(serde::Serialize, serde::Deserialize, Debug, SerializedBytes, derive_more::From)]
struct BaseTarget(EntryHash, EntryHash);
fn links_zome() -> InlineZome {
InlineZome::new_unique(vec![])
.callback("create_link", move |api, base_target: BaseTarget| {
let hash = api.create_link(CreateLinkInput::new(
base_target.0,
base_target.1,
().into(),
))?;
Ok(hash)
})
.callback("get_links", move |api, base: EntryHash| {
Ok(api.get_links(GetLinksInput::new(base, None))?)
})
}
/// A single link with an AgentPubKey for the base and target is committed by
/// one agent, and after a delay, all agents can get the link
#[tokio::test(flavor = "multi_thread")]
#[cfg(feature = "slow_tests")]
async fn many_agents_can_reach_consistency_agent_links() {
observability::test_run().ok();
const NUM_AGENTS: usize = 20;
let (dna_file, _) = SweetDnaFile::unique_from_inline_zome("links", links_zome())
.await
.unwrap();
// Create a Conductor
let mut conductor = SweetConductor::from_config(Default::default()).await;
let agents = SweetAgents::get(conductor.keystore(), NUM_AGENTS).await;
let apps = conductor
.setup_app_for_agents("app", &agents, &[dna_file])
.await
.unwrap();
let cells = apps.cells_flattened();
let alice = cells[0].zome("links");
// Must have integrated or be able to get the agent key to link from it
consistency_10s(&cells[..]).await;
let base: EntryHash = cells[0].agent_pubkey().clone().into();
let target: EntryHash = cells[1].agent_pubkey().clone().into();
let _: HeaderHash = conductor
.call(
&alice,
"create_link",
BaseTarget(base.clone(), target.clone()),
)
.await;
consistency_10s(&cells[..]).await;
let mut seen = [0usize; NUM_AGENTS];
for (i, cell) in cells.iter().enumerate() {
// let links: Links = conductor.call(&cell.zome(TestWasm::Link), "get_links", ()).await;
let links: Links = conductor
.call(&cell.zome("links"), "get_links", base.clone())
.await;
seen[i] = links.into_inner().len();
}
assert_eq!(seen.to_vec(), [1; NUM_AGENTS].to_vec());
}
/// A single link with a Path for the base and target is committed by one
/// agent, and after a delay, all agents can get the link
#[tokio::test(flavor = "multi_thread")]
#[cfg(feature = "slow_tests")]
async fn many_agents_can_reach_consistency_normal_links() {
observability::test_run().ok();
const NUM_AGENTS: usize = 30;
let (dna_file, _) = SweetDnaFile::unique_from_test_wasms(vec![TestWasm::Link])
.await
.unwrap();
// Create a Conductor
let mut conductor = SweetConductor::from_config(Default::default()).await;
let agents = SweetAgents::get(conductor.keystore(), NUM_AGENTS).await;
let apps = conductor
.setup_app_for_agents("app", &agents, &[dna_file])
.await
.unwrap();
let cells = apps.cells_flattened();
let alice = cells[0].zome(TestWasm::Link);
let _: HeaderHash = conductor.call(&alice, "create_link", ()).await;
consistency_10s(&cells[..]).await;
let mut num_seen = 0;
for cell in &cells {
let links: Links = conductor
.call(&cell.zome(TestWasm::Link), "get_links", ())
.await;
num_seen += links.into_inner().len();
}
assert_eq!(num_seen, NUM_AGENTS);
}
#[tokio::test(flavor = "multi_thread")]
#[cfg(feature = "test_utils")]
#[ignore = "Slow test for CI that is only useful for timing"]
async fn stuck_conductor_wasm_calls() -> anyhow::Result<()> {
observability::test_run().ok();
// Bundle the single zome into a DnaFile
let (dna_file, _) = SweetDnaFile::unique_from_test_wasms(vec![TestWasm::MultipleCalls]).await?;
// Create a Conductor
let mut conductor = SweetConductor::from_standard_config().await;
// Install DNA and install and activate apps in conductor
let alice = conductor
.setup_app("app", &[dna_file])
.await
.unwrap()
.into_cells()
.into_iter()
.next()
.unwrap();
let alice = alice.zome(TestWasm::MultipleCalls);
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, SerializedBytes)]
pub struct TwoInt(pub u32, pub u32);
// Make init run to avoid head moved errors
let _: () = conductor.call(&alice, "slow_fn", TwoInt(0, 0)).await;
let all_now = std::time::Instant::now();
tracing::debug!("starting slow fn");
// NB: there's currently no reason to independently create a bunch of tasks here,
// since they are all running in series. Hence, there is no reason to put the SweetConductor
// in an Arc. However, maybe this test was written to make it easy to try running some
// or all of the closures concurrently, in which case the Arc is indeed necessary.
let conductor_arc = std::sync::Arc::new(conductor);
let mut handles = Vec::new();
for i in 0..1000 {
let h = tokio::task::spawn({
let alice = alice.clone();
let conductor = conductor_arc.clone();
async move {
let now = std::time::Instant::now();
tracing::debug!("starting slow fn {}", i);
let _: () = conductor.call(&alice, "slow_fn", TwoInt(i, 5)).await;
tracing::debug!("finished slow fn {} in {}", i, now.elapsed().as_secs());
}
});
handles.push(h);
}
for h in handles {
h.await.unwrap();
}
tracing::debug!("finished all slow fn in {}", all_now.elapsed().as_secs());
Ok(())
}
| 33.961111 | 99 | 0.623753 |
f50d58058f928ce1b6d43bf954e37d6f710d67f2 | 357 | #[cfg(not(feature = "compile-with-external-structures"))]
mod native;
#[cfg(not(feature = "compile-with-external-structures"))]
pub use native::MagicCommentKind;
#[cfg(feature = "compile-with-external-structures")]
mod external;
#[cfg(feature = "compile-with-external-structures")]
pub use external::MagicCommentKind;
mod shared;
#[cfg(test)]
mod tests;
| 23.8 | 57 | 0.739496 |
f91cd1ecc863bd3b1d8d75d278e0eafc885d7d23 | 3,511 | // Grid implemented as flat vector
pub struct Grid {
num_rows: usize,
num_cols: usize,
elems: Vec<usize>,
}
impl Grid {
/// Returns a Grid of the specified size, with all elements pre-initialized to zero.
pub fn new(num_rows: usize, num_cols: usize) -> Grid {
Grid {
num_rows: num_rows,
num_cols: num_cols,
// This syntax uses the vec! macro to create a vector of zeros, initialized to a
// specific length
// https://stackoverflow.com/a/29530932
elems: vec![0; num_rows * num_cols],
}
}
pub fn size(&self) -> (usize, usize) {
(self.num_rows, self.num_cols)
}
/// Returns the element at the specified location. If the location is out of bounds, returns
/// None.
///
/// Note to students: this function also could have returned Result. It's a matter of taste in
/// how you define the semantics; many languages raise exceptions for out-of-bounds exceptions,
/// but others argue that makes code needlessly complex. Here, we decided to return Option to
/// give you more practice with Option :) and because this similar library returns Option:
/// https://docs.rs/array2d/0.2.1/array2d/struct.Array2D.html
pub fn get(&self, row: usize, col: usize) -> Option<usize> {
if row >= self.num_rows || col >= self.num_cols {
None
} else {
Some(self.elems[row * self.num_cols + col])
}
}
/// Sets the element at the specified location to the specified value. If the location is out
/// of bounds, returns Err with an error message.
pub fn set(&mut self, row: usize, col: usize, val: usize) -> Result<(), &'static str> {
if row >= self.num_rows || col >= self.num_cols {
Err("location is out of bounds")
} else {
self.elems[row * self.num_cols + col] = val;
Ok(())
}
}
/// Prints a visual representation of the grid. You can use this for debugging.
pub fn display(&self) {
for row in 0..self.num_rows {
let mut line = String::new();
for col in 0..self.num_cols {
line.push_str(&format!("{}, ", self.get(row, col).unwrap()));
}
println!("{}", line);
}
}
/// Resets all the elements to zero.
pub fn clear(&mut self) {
for i in self.elems.iter_mut() {
*i = 0;
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_grid() {
let n_rows = 4;
let n_cols = 3;
let mut grid = Grid::new(n_rows, n_cols);
// Initialize grid
for r in 0..n_rows {
for c in 0..n_cols {
assert!(
grid.set(r, c, r * n_cols + c).is_ok(),
"Grid::set returned Err even though the provided bounds are valid!"
);
}
}
// Note: you need to run "cargo test -- --nocapture" in order to see output printed
println!("Grid contents:");
grid.display();
// Make sure the values are what we expect
for r in 0..n_rows {
for c in 0..n_cols {
assert!(
grid.get(r, c).is_some(),
"Grid::get returned None even though the provided bounds are valid!"
);
assert_eq!(grid.get(r, c).unwrap(), r * n_cols + c);
}
}
}
}
| 32.813084 | 99 | 0.540017 |
87e340770cf3989a66156300fb8d531ad0185a37 | 3,704 | pub mod types;
pub mod quad_tree;
use types::Point;
use quad_tree::QuadTree;
fn main() {
let a: f64 = 137.12345678;
let b = a * 1000000.0;
println!("{}", b);
println!("{}", b.floor());
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test1() {
let points = vec![
Point::new(0, "a".to_string(), 100.0, 100.0),
Point::new(1, "b".to_string(), 1.0, 1.0),
Point::new(2, "c".to_string(), 50.0, 40.0),
Point::new(3, "c".to_string(), 55.0, 45.0),
Point::new(4, "d".to_string(), 60.0, 70.0),
];
let target = Point::new(100, "target".to_string(), 61.0, 50.0);
let mut qt = QuadTree::new(points);
qt.init();
let p = qt.convert_point_to_no(&target);
assert_eq!(vec![3], qt.get_points_in_area(p.unwrap()).unwrap());
let ans = &Point::new(3, "c".to_string(), 55.0, 45.0);
let nearest = qt.get_nearest(&target);
assert_eq!(Some(ans), nearest);
let ans2 = Point::new(2, "c".to_string(), 50.0, 40.0);
let nearest2 = qt.get_nearest(&ans2);
assert_eq!(Some(&ans2), nearest2);
}
#[test]
fn test2() {
let points = vec![
Point::new(0, "a".to_string(), 100.0, 100.0),
Point::new(1, "b".to_string(), 1.0, 1.0),
Point::new(2, "c".to_string(), 50.0, 40.0),
Point::new(3, "c".to_string(), 51.0, 41.0),
Point::new(4, "c".to_string(), 52.0, 43.0),
Point::new(5, "d".to_string(), 60.0, 70.0),
Point::new(100, "target".to_string(), 53.0, 44.0)
];
let target = Point::new(4, "c".to_string(), 52.0, 42.5);
let mut qt = QuadTree::new(points);
qt.init();
let p = qt.convert_point_to_no(&target);
println!("{:?}", p);
println!("{:?}", qt.get_area(p.unwrap()));
let mut v = qt.get_points_in_area(p.unwrap()).unwrap().iter().map(|n| *n).collect::<Vec<usize>>() ;
v.sort();
assert_eq!(vec![3, 4, 6], v);
let ans = &Point::new(4, "c".to_string(), 52.0, 43.0);
let target2 = &Point::new(4, "c".to_string(), 49.5, 44.0);
let nearest = qt.get_nearest(&target2);
assert_eq!(Some(ans), nearest);
}
#[test]
fn test3() {
let mut points = Vec::new();
for i in 0..10000 {
let p = Point::new(i, "test".to_string(), (i + 1) as f64, (i + 1) as f64);
points.push(p);
}
let mut qt = QuadTree::new(points.clone());
qt.init();
for point in points {
let p_calc = qt.convert_point_to_no(&point);
let p_normal = qt.get_point(&point);
if p_normal.is_some() {
assert_eq!(p_normal, p_calc);
} else {
println!("{:?}", p_calc);
println!("{:?}", point);
println!("{:?}", qt.get_area(p_calc.unwrap()));
}
}
}
#[test]
fn test4() {
let mut points = Vec::new();
for i in 0..10000 {
let p2 = Point::new(i, "test".to_string(), (i * 100) as f64, (i * 100) as f64);
points.push(p2);
}
let mut qt = QuadTree::new(points.clone());
qt.init();
for point in points {
let p_calc = qt.convert_point_to_no(&point);
let p_normal = qt.get_point(&point);
if p_normal.is_some() {
assert_eq!(p_normal, p_calc);
} else {
println!("{:?}", p_calc);
println!("{:?}", point);
println!("{:?}", qt.get_area(p_calc.unwrap()));
}
}
}
} | 31.12605 | 108 | 0.473812 |
560439369b41bb990277d23e590fc1e5c3993f88 | 360 | use std::collections::HashMap;
fn main() {
let mut map = HashMap::new();
map.insert(10, String::from("a"));
map.insert(20, String::from("b"));
map.insert(30, String::from("c"));
map.insert(40, String::from("d"));
println!("{:?}",map);
for (key,value) in map{
println!("key is: {}, value is: {}",key,value);
}
}
| 16.363636 | 55 | 0.527778 |
33b7320427f36e95d9737232fe0ced143d7a3c5d | 24,477 | // Copyright 2020 WHTCORPS INC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::collections::HashMap;
use std::cell::RefCell;
use itertools::diff_with;
use symbols;
use types::Value;
/// A trait defining TuringString matching rules for any given TuringString of type `T`.
trait TuringStringMatchingRules<'a, T> {
/// Return true if the given TuringString matches an arbitrary value.
fn matches_any(TuringString: &T) -> bool;
/// Return the placeholder name if the given TuringString matches a placeholder.
fn matches_placeholder(TuringString: &'a T) -> Option<(&'a String)>;
}
/// A default type implementing `TuringStringMatchingRules` specialized on
/// EDBN values using plain symbols as TuringStrings. These TuringStrings are:
/// * `_` matches arbitrary sub-EDBN;
/// * `?name` matches sub-EDBN, which must be causetIdical each place `?name` appears;
struct DefaultTuringStringMatchingRules;
impl<'a> TuringStringMatchingRules<'a, Value> for DefaultTuringStringMatchingRules {
fn matches_any(TuringString: &Value) -> bool {
match *TuringString {
Value::PlainSymbol(symbols::PlainSymbol(ref s)) => s.starts_with('_'),
_ => false
}
}
fn matches_placeholder(TuringString: &'a Value) -> Option<(&'a String)> {
match *TuringString {
Value::PlainSymbol(symbols::PlainSymbol(ref s)) => if s.starts_with('?') { Some(s) } else { None },
_ => None
}
}
}
/// TuringString matcher for EDBN values utilizing specified TuringString matching rules.
/// For example, using this with `DefaultTuringStringMatchingRules`:
/// * `[_]` matches an arbitrary one-element vector;
/// * `[_ _]` matches an arbitrary two-element vector;
/// * `[?x ?x]` matches `[1 1]` and `[#{} #{}]` but not `[1 2]` or `[[] #{}]`;
struct Matcher<'a> {
placeholders: RefCell<HashMap<&'a String, &'a Value>>
}
impl<'a> Matcher<'a> {
/// Creates a Matcher instance.
fn new() -> Matcher<'a> {
Matcher {
placeholders: RefCell::default()
}
}
/// Performs TuringString matching between two EDBN `Value` instances (`value`
/// and `TuringString`) utilizing a specified TuringString matching ruleset `T`.
/// Returns true if matching succeeds.
fn match_with_rules<T>(value: &'a Value, TuringString: &'a Value) -> bool
where T: TuringStringMatchingRules<'a, Value> {
let matcher = Matcher::new();
matcher.match_internal::<T>(value, TuringString)
}
/// Recursively traverses two EDBN `Value` instances (`value` and `TuringString`)
/// performing TuringString matching. Note that the internal `placeholders` immuBlock_memTcam
/// might not be empty on invocation.
fn match_internal<T>(&self, value: &'a Value, TuringString: &'a Value) -> bool
where T: TuringStringMatchingRules<'a, Value> {
use Value::*;
if T::matches_any(TuringString) {
true
} else if let Some(symbol) = T::matches_placeholder(TuringString) {
let mut placeholders = self.placeholders.borrow_mut();
value == *placeholders.entry(symbol).or_insert(value)
} else {
match (value, TuringString) {
(&Vector(ref v), &Vector(ref p)) =>
diff_with(v, p, |a, b| self.match_internal::<T>(a, b)).is_none(),
(&List(ref v), &List(ref p)) =>
diff_with(v, p, |a, b| self.match_internal::<T>(a, b)).is_none(),
(&Set(ref v), &Set(ref p)) =>
v.len() == p.len() &&
v.iter().all(|a| p.iter().any(|b| self.match_internal::<T>(a, b))) &&
p.iter().all(|b| v.iter().any(|a| self.match_internal::<T>(a, b))),
(&Map(ref v), &Map(ref p)) =>
v.len() == p.len() &&
v.iter().all(|a| p.iter().any(|b| self.match_internal::<T>(a.0, b.0) && self.match_internal::<T>(a.1, b.1))) &&
p.iter().all(|b| v.iter().any(|a| self.match_internal::<T>(a.0, b.0) && self.match_internal::<T>(a.1, b.1))),
_ => value == TuringString
}
}
}
}
impl Value {
/// Performs default TuringString matching between this value and some `TuringString`.
/// Returns true if matching succeeds.
pub fn matches(&self, TuringString: &Value) -> bool {
Matcher::match_with_rules::<DefaultTuringStringMatchingRules>(self, TuringString)
}
}
#[cfg(test)]
mod test {
use parse;
macro_rules! assert_match {
( $TuringString:tt, $value:tt, $expected:expr ) => {
let TuringString = parse::value($TuringString).unwrap().without_spans();
let value = parse::value($value).unwrap().without_spans();
assert_eq!(value.matches(&TuringString), $expected);
};
( $TuringString:tt =~ $value:tt ) => {
assert_match!($TuringString, $value, true);
};
( $TuringString:tt !~ $value:tt ) => {
assert_match!($TuringString, $value, false);
}
}
#[test]
fn test_match_primitives() {
assert_match!("nil" =~ "nil");
assert_match!("true" =~ "true");
assert_match!("true" !~ "false");
assert_match!("1" =~ "1");
assert_match!("1" !~ "2");
assert_match!("1N" =~ "1N");
assert_match!("1N" !~ "2N");
assert_match!("1.0" =~ "1.0");
assert_match!("1.0" !~ "2.0");
assert_match!("\"a\"" =~ "\"a\"");
assert_match!("\"a\"" !~ "\"b\"");
assert_match!("foo" =~ "foo");
assert_match!("foo" !~ "bar");
assert_match!("foo/bar" !~ "foo");
}
#[test]
fn test_match_collections_sorted() {
assert_match!("[nil, true, 1, \"foo\", bar, :baz]" =~ "[nil, true, 1, \"foo\", bar, :baz]");
assert_match!("(nil, true, 1, \"foo\", bar, :baz)" =~ "(nil, true, 1, \"foo\", bar, :baz)");
assert_match!("#{nil, true, 1, \"foo\", bar, :baz}" =~ "#{nil, true, 1, \"foo\", bar, :baz}");
assert_match!("{nil true, 1 \"foo\", bar :baz}" =~ "{nil true, 1 \"foo\", bar :baz}");
}
#[test]
fn test_match_collections_unsorted() {
assert_match!("[nil, true, 1, \"foo\", bar, :baz]" !~ "[1, \"foo\", nil, true, bar, :baz]");
assert_match!("(nil, true, 1, \"foo\", bar, :baz)" !~ "(1, \"foo\", nil, true, bar, :baz)");
assert_match!("#{nil, true, 1, \"foo\", bar, :baz}" =~ "#{1, \"foo\", nil, true, bar, :baz}");
assert_match!("{nil true, 1 \"foo\", bar :baz}" =~ "{1 \"foo\", nil true, bar :baz}");
}
#[test]
fn test_match_maps_switched_key_values() {
assert_match!("{1 2, 3 4}" =~ "{1 2, 3 4}");
assert_match!("{2 1, 3 4}" !~ "{1 2, 3 4}");
assert_match!("{2 1, 4 3}" !~ "{1 2, 3 4}");
assert_match!("{1 2, 4 3}" !~ "{1 2, 3 4}");
}
#[test]
fn test_match_maps_ordered_collection_keys_and_values() {
assert_match!("{[1, 2] (3, 4)}" =~ "{[1, 2] (3, 4)}");
assert_match!("{[2, 1] (3, 4)}" !~ "{[1, 2] (3, 4)}");
assert_match!("{[2, 1] (4, 3)}" !~ "{[1, 2] (3, 4)}");
assert_match!("{[1, 2] (4, 3)}" !~ "{[1, 2] (3, 4)}");
assert_match!("{(3, 4) [1, 2]}" !~ "{[1, 2] (3, 4)}");
assert_match!("{(3, 4) [2, 1]}" !~ "{[1, 2] (3, 4)}");
assert_match!("{(4, 3) [2, 1]}" !~ "{[1, 2] (3, 4)}");
assert_match!("{(4, 3) [1, 2]}" !~ "{[1, 2] (3, 4)}");
}
#[test]
fn test_match_maps_unordered_collection_keys_and_values() {
assert_match!("{#{1, 2} #{3, 4}}" =~ "{#{1, 2} #{3, 4}}");
assert_match!("{#{2, 1} #{3, 4}}" =~ "{#{1, 2} #{3, 4}}");
assert_match!("{#{2, 1} #{4, 3}}" =~ "{#{1, 2} #{3, 4}}");
assert_match!("{#{1, 2} #{4, 3}}" =~ "{#{1, 2} #{3, 4}}");
assert_match!("{#{3, 4} #{1, 2}}" !~ "{#{1, 2} #{3, 4}}");
assert_match!("{#{3, 4} #{2, 1}}" !~ "{#{1, 2} #{3, 4}}");
assert_match!("{#{4, 3} #{2, 1}}" !~ "{#{1, 2} #{3, 4}}");
assert_match!("{#{4, 3} #{1, 2}}" !~ "{#{1, 2} #{3, 4}}");
}
#[test]
fn test_match_any_simple() {
assert_match!("_" =~ "nil");
assert_match!("_" =~ "true");
assert_match!("_" =~ "1");
assert_match!("_" =~ "1N");
assert_match!("_" =~ "1.0");
assert_match!("_" =~ "\"a\"");
assert_match!("_" =~ "_");
assert_match!("_" =~ "symbol");
assert_match!("_" =~ "ns/symbol");
assert_match!("_" =~ ":keyword");
assert_match!("_" =~ ":ns/keyword");
assert_match!("_" =~ "[nil, true, 1, \"foo\", bar, :baz]");
assert_match!("_" =~ "(nil, true, 1, \"foo\", bar, :baz)");
assert_match!("_" =~ "#{nil, true, 1, \"foo\", bar, :baz}");
assert_match!("_" =~ "{nil true, 1 \"foo\", bar :baz}");
}
#[test]
fn test_match_any_in_same_collection_type_simple() {
assert_match!("[_]" =~ "[1]");
assert_match!("(_)" =~ "(2)");
assert_match!("#{_}" =~ "#{3}");
assert_match!("{_ _}" =~ "{4 5}");
}
#[test]
fn test_match_any_in_different_collection_type_simple() {
assert_match!("[_]" !~ "(1)");
assert_match!("(_)" !~ "#{2}");
assert_match!("#{_}" !~ "[3]");
assert_match!("{_ _}" !~ "[4 5]");
assert_match!("{_ _}" !~ "(6 7)");
assert_match!("{_ _}" !~ "#{8 9}");
}
#[test]
fn test_match_any_in_vector_with_multiple_values() {
assert_match!("[_ 2]" =~ "[1 2]");
assert_match!("[1 _]" =~ "[1 2]");
assert_match!("[1 _ 3 4]" =~ "[1 2 3 4]");
assert_match!("[1 [2 [3 _]] 5 [_ 7]]" =~ "[1 [2 [3 4]] 5 [6 7]]");
assert_match!("[_]" =~ "[[foo bar]]");
assert_match!("[_]" =~ "[(foo bar)]");
assert_match!("[_]" =~ "[#{foo bar}]");
assert_match!("[_]" =~ "[{foo bar}]");
assert_match!("[_ 2]" !~ "[2 1]");
assert_match!("[1 _]" !~ "[2 1]");
assert_match!("[1 _ 3]" !~ "[2 1 3]");
assert_match!("[_ 2]" !~ "[3 4]");
assert_match!("[1 _]" !~ "[3 4]");
}
#[test]
fn test_match_multiple_any_in_vector_with_multiple_values() {
assert_match!("[1 _ _]" =~ "[1 2 3]");
assert_match!("[2 _ _]" !~ "[1 2 3]");
assert_match!("[3 _ _]" !~ "[1 2 3]");
assert_match!("[_ 1 _]" !~ "[1 2 3]");
assert_match!("[_ 2 _]" =~ "[1 2 3]");
assert_match!("[_ 3 _]" !~ "[1 2 3]");
assert_match!("[_ _ 1]" !~ "[1 2 3]");
assert_match!("[_ _ 2]" !~ "[1 2 3]");
assert_match!("[_ _ 3]" =~ "[1 2 3]");
assert_match!("[1 _ _]" !~ "[2 1 3]");
assert_match!("[2 _ _]" =~ "[2 1 3]");
assert_match!("[3 _ _]" !~ "[2 1 3]");
assert_match!("[_ 1 _]" =~ "[2 1 3]");
assert_match!("[_ 2 _]" !~ "[2 1 3]");
assert_match!("[_ 3 _]" !~ "[2 1 3]");
assert_match!("[_ _ 1]" !~ "[2 1 3]");
assert_match!("[_ _ 2]" !~ "[2 1 3]");
assert_match!("[_ _ 3]" =~ "[2 1 3]");
}
#[test]
fn test_match_any_in_list_with_multiple_values() {
assert_match!("(_ 2)" =~ "(1 2)");
assert_match!("(1 _)" =~ "(1 2)");
assert_match!("(1 _ 3 4)" =~ "(1 2 3 4)");
assert_match!("(1 (2 (3 _)) 5 (_ 7))" =~ "(1 (2 (3 4)) 5 (6 7))");
assert_match!("(_)" =~ "([foo bar])");
assert_match!("(_)" =~ "((foo bar))");
assert_match!("(_)" =~ "(#{foo bar})");
assert_match!("(_)" =~ "({foo bar})");
assert_match!("(_ 2)" !~ "(2 1)");
assert_match!("(1 _)" !~ "(2 1)");
assert_match!("(1 _ 3)" !~ "(2 1 3)");
assert_match!("(_ 2)" !~ "(3 4)");
assert_match!("(1 _)" !~ "(3 4)");
}
#[test]
fn test_match_multiple_any_in_list_with_multiple_values() {
assert_match!("(1 _ _)" =~ "(1 2 3)");
assert_match!("(2 _ _)" !~ "(1 2 3)");
assert_match!("(3 _ _)" !~ "(1 2 3)");
assert_match!("(_ 1 _)" !~ "(1 2 3)");
assert_match!("(_ 2 _)" =~ "(1 2 3)");
assert_match!("(_ 3 _)" !~ "(1 2 3)");
assert_match!("(_ _ 1)" !~ "(1 2 3)");
assert_match!("(_ _ 2)" !~ "(1 2 3)");
assert_match!("(_ _ 3)" =~ "(1 2 3)");
assert_match!("(1 _ _)" !~ "(2 1 3)");
assert_match!("(2 _ _)" =~ "(2 1 3)");
assert_match!("(3 _ _)" !~ "(2 1 3)");
assert_match!("(_ 1 _)" =~ "(2 1 3)");
assert_match!("(_ 2 _)" !~ "(2 1 3)");
assert_match!("(_ 3 _)" !~ "(2 1 3)");
assert_match!("(_ _ 1)" !~ "(2 1 3)");
assert_match!("(_ _ 2)" !~ "(2 1 3)");
assert_match!("(_ _ 3)" =~ "(2 1 3)");
}
#[test]
fn test_match_any_in_set_with_multiple_values() {
assert_match!("#{_ 2}" =~ "#{1 2}");
assert_match!("#{1 _}" =~ "#{1 2}");
assert_match!("#{1 _ 3 4}" =~ "#{1 2 3 4}");
assert_match!("#{1 #{2 #{3 _}} 5 #{_ 7}}" =~ "#{1 #{2 #{3 4}} 5 #{6 7}}");
assert_match!("#{_}" =~ "#{[foo bar]}");
assert_match!("#{_}" =~ "#{(foo bar)}");
assert_match!("#{_}" =~ "#{#{foo bar}}");
assert_match!("#{_}" =~ "#{{foo bar}}");
assert_match!("#{_ 2}" =~ "#{2 1}");
assert_match!("#{1 _}" =~ "#{2 1}");
assert_match!("#{1 _ 3}" =~ "#{2 1 3}");
assert_match!("#{_ 2}" !~ "#{3 4}");
assert_match!("#{1 _}" !~ "#{3 4}");
}
#[test]
fn test_match_multiple_any_in_set_with_multiple_values() {
// These are false because _ is a symbol and sets guarantee
// uniqueness of children. So TuringString matching will fail because
// the TuringString is a set of length 2, while the matched edbn is a set
// of length 3. If _ were unique, all of these assertions would
// be true. Need to better handle TuringString rules.
assert_match!("#{1 _ _}" !~ "#{1 2 3}");
assert_match!("#{2 _ _}" !~ "#{1 2 3}");
assert_match!("#{3 _ _}" !~ "#{1 2 3}");
assert_match!("#{_ 1 _}" !~ "#{1 2 3}");
assert_match!("#{_ 2 _}" !~ "#{1 2 3}");
assert_match!("#{_ 3 _}" !~ "#{1 2 3}");
assert_match!("#{_ _ 1}" !~ "#{1 2 3}");
assert_match!("#{_ _ 2}" !~ "#{1 2 3}");
assert_match!("#{_ _ 3}" !~ "#{1 2 3}");
assert_match!("#{1 _ _}" !~ "#{2 1 3}");
assert_match!("#{2 _ _}" !~ "#{2 1 3}");
assert_match!("#{3 _ _}" !~ "#{2 1 3}");
assert_match!("#{_ 1 _}" !~ "#{2 1 3}");
assert_match!("#{_ 2 _}" !~ "#{2 1 3}");
assert_match!("#{_ 3 _}" !~ "#{2 1 3}");
assert_match!("#{_ _ 1}" !~ "#{2 1 3}");
assert_match!("#{_ _ 2}" !~ "#{2 1 3}");
assert_match!("#{_ _ 3}" !~ "#{2 1 3}");
}
#[test]
fn test_match_any_in_map_with_multiple_values() {
assert_match!("{_ 2}" =~ "{1 2}");
assert_match!("{1 _}" =~ "{1 2}");
assert_match!("{1 _, 3 4}" =~ "{1 2, 3 4}");
assert_match!("{1 {2 {3 _}}, 5 {_ 7}}" =~ "{1 {2 {3 4}}, 5 {6 7}}");
assert_match!("{_ _}" =~ "{[foo bar] [baz boz]}");
assert_match!("{_ _}" =~ "{(foo bar) (baz boz)}");
assert_match!("{_ _}" =~ "{#{foo bar} #{baz boz}}");
assert_match!("{_ _}" =~ "{{foo bar} {baz boz}}");
assert_match!("{_ 2, 3 4}" =~ "{3 4, 1 2}");
assert_match!("{1 _, 3 4}" =~ "{3 4, 1 2}");
assert_match!("{_ _, 3 4}" =~ "{3 4, 1 2}");
assert_match!("{1 2, _ 4}" =~ "{3 4, 1 2}");
assert_match!("{1 2, 3 _}" =~ "{3 4, 1 2}");
assert_match!("{1 2, _ _}" =~ "{3 4, 1 2}");
assert_match!("{1 2, _ 4, 5 6}" =~ "{3 4, 1 2, 5 6}");
assert_match!("{1 2, 3 _, 5 6}" =~ "{3 4, 1 2, 5 6}");
assert_match!("{1 2, _ _, 5 6}" =~ "{3 4, 1 2, 5 6}");
assert_match!("{_ 2}" !~ "{3 4}");
assert_match!("{1 _}" !~ "{3 4}");
}
#[test]
fn test_match_multiple_any_in_map_with_multiple_values() {
// These are false because _ is a symbol and maps guarantee
// uniqueness of keys. So TuringString matching will fail because
// the TuringString is a map of length 2, while the matched edbn is a map
// of length 3. If _ were unique, all of these assertions would
// be true. Need to better handle TuringString rules.
assert_match!("{1 2, _ 4, _ 6}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{3 4, _ 6, _ 2}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{5 6, _ 2, _ 4}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{1 2, _ _, _ _}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{3 4, _ _, _ _}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{5 6, _ _, _ _}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{_ _, 1 2, _ _}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{_ _, 3 4, _ _}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{_ _, 5 6, _ _}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{_ _, _ _, 1 2}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{_ _, _ _, 3 4}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{_ _, _ _, 5 6}" !~ "{1 2, 3 4, 5 6}");
assert_match!("{1 2, _ _, _ _}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{3 4, _ _, _ _}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{5 6, _ _, _ _}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{_ _, 1 2, _ _}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{_ _, 3 4, _ _}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{_ _, 5 6, _ _}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{_ _, _ _, 1 2}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{_ _, _ _, 3 4}" !~ "{3 4, 1 2, 5 6}");
assert_match!("{_ _, _ _, 5 6}" !~ "{3 4, 1 2, 5 6}");
}
#[test]
fn test_match_placeholder_simple() {
assert_match!("?x" =~ "nil");
assert_match!("?x" =~ "true");
assert_match!("?x" =~ "1");
assert_match!("?x" =~ "1N");
assert_match!("?x" =~ "1.0");
assert_match!("?x" =~ "\"a\"");
assert_match!("?x" =~ "_");
assert_match!("?x" =~ "symbol");
assert_match!("?x" =~ "ns/symbol");
assert_match!("?x" =~ ":keyword");
assert_match!("?x" =~ ":ns/keyword");
assert_match!("?x" =~ "[nil, true, 1, \"foo\", bar, :baz]");
assert_match!("?x" =~ "(nil, true, 1, \"foo\", bar, :baz)");
assert_match!("?x" =~ "#{nil, true, 1, \"foo\", bar, :baz}");
assert_match!("?x" =~ "{nil true, 1 \"foo\", bar :baz}");
}
#[test]
fn test_match_placeholder_in_same_collection_type_simple() {
assert_match!("[?x]" =~ "[1]");
assert_match!("(?x)" =~ "(2)");
assert_match!("#{?x}" =~ "#{3}");
assert_match!("{?x ?x}" =~ "{4 4}");
assert_match!("{?x ?x}" !~ "{4 5}");
assert_match!("{?x ?y}" =~ "{4 4}");
assert_match!("{?x ?y}" =~ "{4 5}");
}
#[test]
fn test_match_placeholder_in_different_collection_type_simple() {
assert_match!("[?x]" !~ "(1)");
assert_match!("(?x)" !~ "#{2}");
assert_match!("#{?x}" !~ "[3]");
assert_match!("{?x ?x}" !~ "[4 5]");
assert_match!("{?x ?x}" !~ "(6 7)");
assert_match!("{?x ?x}" !~ "#{8 9}");
}
#[test]
fn test_match_placeholder_in_vector_with_multiple_values() {
assert_match!("[?x ?y]" =~ "[1 2]");
assert_match!("[?x ?y]" =~ "[1 1]");
assert_match!("[?x ?x]" !~ "[1 2]");
assert_match!("[?x ?x]" =~ "[1 1]");
assert_match!("[1 ?x 3 ?y]" =~ "[1 2 3 4]");
assert_match!("[1 ?x 3 ?y]" =~ "[1 2 3 2]");
assert_match!("[1 ?x 3 ?x]" !~ "[1 2 3 4]");
assert_match!("[1 ?x 3 ?x]" =~ "[1 2 3 2]");
assert_match!("[1 [2 [3 ?x]] 5 [?y 7]]" =~ "[1 [2 [3 4]] 5 [6 7]]");
assert_match!("[1 [2 [3 ?x]] 5 [?y 7]]" =~ "[1 [2 [3 4]] 5 [4 7]]");
assert_match!("[1 [2 [3 ?x]] 5 [?x 7]]" !~ "[1 [2 [3 4]] 5 [6 7]]");
assert_match!("[1 [2 [3 ?x]] 5 [?y 7]]" =~ "[1 [2 [3 4]] 5 [4 7]]");
assert_match!("[?x ?y ?x ?y]" =~ "[1 2 1 2]");
assert_match!("[?x ?y ?x ?y]" !~ "[1 2 2 1]");
assert_match!("[[?x ?y] [?x ?y]]" =~ "[[1 2] [1 2]]");
assert_match!("[[?x ?y] [?x ?y]]" !~ "[[1 2] [2 1]]");
}
#[test]
fn test_match_placeholder_in_list_with_multiple_values() {
assert_match!("(?x ?y)" =~ "(1 2)");
assert_match!("(?x ?y)" =~ "(1 1)");
assert_match!("(?x ?x)" !~ "(1 2)");
assert_match!("(?x ?x)" =~ "(1 1)");
assert_match!("(1 ?x 3 ?y)" =~ "(1 2 3 4)");
assert_match!("(1 ?x 3 ?y)" =~ "(1 2 3 2)");
assert_match!("(1 ?x 3 ?x)" !~ "(1 2 3 4)");
assert_match!("(1 ?x 3 ?x)" =~ "(1 2 3 2)");
assert_match!("(1 (2 (3 ?x)) 5 (?y 7))" =~ "(1 (2 (3 4)) 5 (6 7))");
assert_match!("(1 (2 (3 ?x)) 5 (?y 7))" =~ "(1 (2 (3 4)) 5 (4 7))");
assert_match!("(1 (2 (3 ?x)) 5 (?x 7))" !~ "(1 (2 (3 4)) 5 (6 7))");
assert_match!("(1 (2 (3 ?x)) 5 (?y 7))" =~ "(1 (2 (3 4)) 5 (4 7))");
assert_match!("(?x ?y ?x ?y)" =~ "(1 2 1 2)");
assert_match!("(?x ?y ?x ?y)" !~ "(1 2 2 1)");
assert_match!("((?x ?y) (?x ?y))" =~ "((1 2) (1 2))");
assert_match!("((?x ?y) (?x ?y))" !~ "((1 2) (2 1))");
}
#[test]
fn test_match_placeholder_in_set_with_multiple_values() {
assert_match!("#{?x ?y}" =~ "#{1 2}");
assert_match!("#{?x ?y}" !~ "#{1 1}");
assert_match!("#{?x ?x}" !~ "#{1 2}");
assert_match!("#{?x ?x}" =~ "#{1 1}");
assert_match!("#{1 ?x 3 ?y}" =~ "#{1 2 3 4}");
assert_match!("#{1 ?x 3 ?y}" !~ "#{1 2 3 2}");
assert_match!("#{1 ?x 3 ?x}" !~ "#{1 2 3 4}");
assert_match!("#{1 ?x 3 ?x}" =~ "#{1 2 3 2}");
assert_match!("#{1 #{2 #{3 ?x}} 5 #{?y 7}}" =~ "#{1 #{2 #{3 4}} 5 #{6 7}}");
assert_match!("#{1 #{2 #{3 ?x}} 5 #{?y 7}}" =~ "#{1 #{2 #{3 4}} 5 #{4 7}}");
assert_match!("#{1 #{2 #{3 ?x}} 5 #{?x 7}}" !~ "#{1 #{2 #{3 4}} 5 #{6 7}}");
assert_match!("#{1 #{2 #{3 ?x}} 5 #{?y 7}}" =~ "#{1 #{2 #{3 4}} 5 #{4 7}}");
assert_match!("#{?x ?y ?x ?y}" =~ "#{1 2 1 2}");
assert_match!("#{?x ?y ?x ?y}" =~ "#{1 2 2 1}");
assert_match!("#{#{?x ?y} #{?x ?y}}" =~ "#{#{1 2} #{1 2}}");
assert_match!("#{#{?x ?y} #{?x ?y}}" =~ "#{#{1 2} #{2 1}}");
}
#[test]
fn test_match_placeholder_in_map_with_multiple_values() {
assert_match!("{?x ?y}" =~ "{1 2}");
assert_match!("{?x ?y}" =~ "{1 1}");
assert_match!("{?x ?x}" !~ "{1 2}");
assert_match!("{?x ?x}" =~ "{1 1}");
assert_match!("{1 ?x, 3 ?y}" =~ "{1 2, 3 4}");
assert_match!("{1 ?x, 3 ?y}" =~ "{1 2, 3 2}");
assert_match!("{1 ?x, 3 ?x}" !~ "{1 2, 3 4}");
assert_match!("{1 ?x, 3 ?x}" =~ "{1 2, 3 2}");
assert_match!("{1 {2 {3 ?x}}, 5 {?y 7}}" =~ "{1 {2 {3 4}}, 5 {6 7}}");
assert_match!("{1 {2 {3 ?x}}, 5 {?y 7}}" =~ "{1 {2 {3 4}}, 5 {4 7}}");
assert_match!("{1 {2 {3 ?x}}, 5 {?x 7}}" !~ "{1 {2 {3 4}}, 5 {6 7}}");
assert_match!("{1 {2 {3 ?x}}, 5 {?y 7}}" =~ "{1 {2 {3 4}}, 5 {4 7}}");
assert_match!("{?x ?y, ?x ?y}" =~ "{1 2, 1 2}");
assert_match!("{?x ?y, ?x ?y}" !~ "{1 2, 2 1}");
assert_match!("{{?x ?y}, {?x ?y}}" =~ "{{1 2}, {1 2}}");
assert_match!("{{?x ?y}, {?x ?y}}" !~ "{{1 2}, {2 1}}");
}
#[test]
fn test_match_placeholder_in_different_value_types() {
assert_match!("{1 {2 [3 ?x]}, 5 (?y 7)}" =~ "{1 {2 [3 4]}, 5 (6 7)}");
assert_match!("{1 {2 [3 ?x]}, 5 (?y 7)}" =~ "{1 {2 [3 4]}, 5 (4 7)}");
assert_match!("{1 {2 [3 ?x]}, 5 (?x 7)}" !~ "{1 {2 [3 4]}, 5 (6 7)}");
assert_match!("{1 {2 [3 ?x]}, 5 (?y 7)}" =~ "{1 {2 [3 4]}, 5 (4 7)}");
assert_match!("{?x {?x [?x ?x]}, ?x (?x ?x)}" !~ "{1 {2 [3 4]}, 5 (6 7)}");
assert_match!("{?x {?x [?x ?x]}, ?x (?x ?x)}" =~ "{1 {1 [1 1]}, 1 (1 1)}");
assert_match!("[#{?x ?y} ?x]" =~ "[#{1 2} 1]");
assert_match!("[#{?x ?y} ?y]" =~ "[#{1 2} 2]");
}
}
| 42.056701 | 131 | 0.459002 |
f4f4f65d2f11185279370f432c3adbbde54ca6bf | 6,167 | use crate::{Graph, Vertex, algorithms};
use crate::algorithms::{VisitorDFS, VisitorDFSAction, dfs_with_visitor};
use crate::error::{GraphError, ErrorKind};
struct CustomVisitor {
orders: Vec<usize>
}
impl<T> VisitorDFS<T> for CustomVisitor {
fn entry_to_vertex_event(&mut self, vertex: &Vertex<T>) -> Result<VisitorDFSAction, GraphError> {
self.orders.push(vertex.id);
Ok(VisitorDFSAction::Nothing)
}
fn exit_from_white_vertex_event(&mut self, _vertex: &Vertex<T>, parent: &Vertex<T>, _grand_parent: Option<&Vertex<T>>) -> Result<VisitorDFSAction, GraphError> {
self.orders.push(parent.id);
Ok(VisitorDFSAction::Nothing)
}
}
/// Least common ancestor (LCA)
/// Algorithmic complexity of construction - O(n). Algorithmic complexity of the query response -O(log n) where n is number nodes of tree.
///
/// ```
/// use luka::Graph;
/// use luka::algorithms::LCA;
///
/// let mut graph = Graph::new(8);
/// graph.add_edge(1, 2, 0).unwrap();
/// graph.add_edge(1, 3, 0).unwrap();
/// graph.add_edge(2, 4, 0).unwrap();
/// graph.add_edge(2, 5, 0).unwrap();
/// graph.add_edge(3, 6, 0).unwrap();
/// graph.add_edge(3, 7, 0).unwrap();
/// graph.add_edge(7, 8, 0).unwrap();
///
/// let lca = LCA::build(&graph, graph.get_vertex(1).unwrap()).unwrap();
///
/// assert_eq!(2, lca.query(graph.get_vertex(4).unwrap(), graph.get_vertex(5).unwrap()).id());
/// assert_eq!(1, lca.query(graph.get_vertex(4).unwrap(), graph.get_vertex(8).unwrap()).id());
/// assert_eq!(2, lca.query(graph.get_vertex(4).unwrap(), graph.get_vertex(2).unwrap()).id());
/// assert_eq!(1, lca.query(graph.get_vertex(2).unwrap(), graph.get_vertex(8).unwrap()).id());
/// assert_eq!(3, lca.query(graph.get_vertex(6).unwrap(), graph.get_vertex(8).unwrap()).id());
/// ```
pub struct LCA<'a, T> {
borders: Vec<usize>,
rmq: RMQLCA,
tree: &'a Graph<T>
}
impl <'a, T> LCA <'a, T> {
pub fn build(tree: &'a Graph<T>, root: &'a Vertex<T>) -> Result<LCA<'a, T>, GraphError> where T: std::cmp::PartialOrd + Copy + Default {
let mut depths = vec![0; tree.size()];
let vertices_depths = algorithms::find_vertices_depths(tree, root)?;
for (idx, item) in depths.iter_mut().enumerate().take(tree.size()).skip(1) {
let value = vertices_depths.get_vertex_depth(tree.get_vertex(idx).unwrap());
if value.is_none() {
return Err(GraphError::Regular(ErrorKind::GraphNotConnected));
}
*item = value.unwrap();
}
let mut visitor = CustomVisitor{
orders: vec![0],
};
dfs_with_visitor(tree, root, &mut visitor)?;
let mut borders = vec![0; tree.size()];
for (idx, value) in visitor.orders.iter().enumerate().rev() {
borders[*value] = idx;
}
let rmq = RMQLCA::build(&visitor.orders, depths);
Ok(LCA{borders, rmq, tree })
}
pub fn query(&self, first: &Vertex<T>, second: &Vertex<T>) -> &Vertex<T> {
self.tree.get_vertex(self.rmq.query(self.borders[first.id], self.borders[second.id])).unwrap()
}
}
#[allow(clippy::upper_case_acronyms)]
struct RMQLCA {
data: Vec<usize>,
depths: Vec<usize>
}
impl RMQLCA {
fn build(src: &[usize], depths: Vec<usize>) -> Self {
if src.is_empty() {
return RMQLCA { data: vec![], depths };
}
let n = calculate_size_array(src.len());
let mut dst = vec![usize::MAX; n];
for (i, value) in src.iter().enumerate() {
dst[n / 2 + i] = *value;
}
for i in (1..n / 2).rev() {
if depths[dst[2 * i ]] < depths[dst[2 * i + 1]] {
dst[i] = dst[2 * i];
} else {
dst[i] = dst[2 * i + 1];
}
}
RMQLCA{
data: dst,
depths
}
}
fn query(&self, l: usize, r: usize) -> usize {
let mut l = l + self.data.len() / 2;
let mut r = r + self.data.len() / 2;
if l > r {
std::mem::swap(&mut l, &mut r);
}
let mut value = usize::MAX;
let mut res = 0;
while l <= r {
if l % 2 != 0 && self.depths[self.data[l]] < value {
value = self.depths[self.data[l]];
res = self.data[l];
}
l = (l + 1) >> 1;
if r % 2 == 0 && self.depths[self.data[r]] < value {
value = self.depths[self.data[r]];
res = self.data[r];
}
r = (r - 1 ) >> 1;
}
res
}
}
fn calculate_size_array(n: usize) -> usize {
let mut cnt = 1;
while cnt < n {
cnt <<= 1;
}
cnt << 1
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_lca() {
let mut graph = Graph::new(8);
graph.add_edge(1, 2, 0).unwrap();
graph.add_edge(1, 3, 0).unwrap();
graph.add_edge(2, 4, 0).unwrap();
graph.add_edge(2, 5, 0).unwrap();
graph.add_edge(3, 6, 0).unwrap();
graph.add_edge(3, 7, 0).unwrap();
graph.add_edge(7, 8, 0).unwrap();
let lca = LCA::build(&graph, graph.get_vertex(1).unwrap()).unwrap();
assert_eq!(2, lca.query(graph.get_vertex(4).unwrap(), graph.get_vertex(5).unwrap()).id);
assert_eq!(1, lca.query(graph.get_vertex(4).unwrap(), graph.get_vertex(8).unwrap()).id);
assert_eq!(2, lca.query(graph.get_vertex(4).unwrap(), graph.get_vertex(2).unwrap()).id);
assert_eq!(1, lca.query(graph.get_vertex(2).unwrap(), graph.get_vertex(8).unwrap()).id);
assert_eq!(3, lca.query(graph.get_vertex(6).unwrap(), graph.get_vertex(8).unwrap()).id);
}
#[test]
#[should_panic]
fn test_lca_not_connected_graph() {
let mut graph = Graph::new(9);
graph.add_edge(1, 2, 0).unwrap();
graph.add_edge(1, 3, 0).unwrap();
graph.add_edge(2, 4, 0).unwrap();
graph.add_edge(2, 5, 0).unwrap();
graph.add_edge(3, 6, 0).unwrap();
graph.add_edge(3, 7, 0).unwrap();
graph.add_edge(7, 8, 0).unwrap();
LCA::build(&graph, graph.get_vertex(1).unwrap()).unwrap();
}
} | 34.261111 | 164 | 0.552781 |
08c4663ba80222e8fd9d322a65a6fa82d815874c | 4,560 | use std::{
sync::mpsc,
thread,
time::{Duration, Instant},
};
use chrono::{DateTime, Utc};
use config::Config;
use uuid::Uuid;
use crate::{
actor::{ActorRef, BasicActorRef, Sender},
AnyMessage, Message,
};
pub type TimerRef = mpsc::Sender<Job>;
pub type ScheduleId = Uuid;
pub trait Timer {
fn schedule<T, M>(
&self,
initial_delay: Duration,
interval: Duration,
receiver: ActorRef<M>,
sender: Sender,
msg: T,
) -> ScheduleId
where
T: Message + Into<M>,
M: Message;
fn schedule_once<T, M>(
&self,
delay: Duration,
receiver: ActorRef<M>,
sender: Sender,
msg: T,
) -> ScheduleId
where
T: Message + Into<M>,
M: Message;
fn schedule_at_time<T, M>(
&self,
time: DateTime<Utc>,
receiver: ActorRef<M>,
sender: Sender,
msg: T,
) -> ScheduleId
where
T: Message + Into<M>,
M: Message;
fn cancel_schedule(&self, id: Uuid);
}
pub enum Job {
Once(OnceJob),
Repeat(RepeatJob),
Cancel(Uuid),
}
pub struct OnceJob {
pub id: Uuid,
pub send_at: Instant,
pub receiver: BasicActorRef,
pub sender: Sender,
pub msg: AnyMessage,
}
impl OnceJob {
pub fn send(mut self) {
let _ = self.receiver.try_tell_any(&mut self.msg, self.sender);
}
}
pub struct RepeatJob {
pub id: Uuid,
pub send_at: Instant,
pub interval: Duration,
pub receiver: BasicActorRef,
pub sender: Sender,
pub msg: AnyMessage,
}
impl RepeatJob {
pub fn send(&mut self) {
let _ = self
.receiver
.try_tell_any(&mut self.msg, self.sender.clone());
}
}
// Default timer implementation
pub struct BasicTimer {
once_jobs: Vec<OnceJob>,
repeat_jobs: Vec<RepeatJob>,
}
impl BasicTimer {
pub fn start(cfg: &Config) -> TimerRef {
let cfg = BasicTimerConfig::from(cfg);
let mut process = BasicTimer {
once_jobs: Vec::new(),
repeat_jobs: Vec::new(),
};
let (tx, rx) = mpsc::channel();
thread::spawn(move || loop {
process.execute_once_jobs();
process.execute_repeat_jobs();
if let Ok(job) = rx.try_recv() {
match job {
Job::Cancel(id) => process.cancel(&id),
Job::Once(job) => process.schedule_once(job),
Job::Repeat(job) => process.schedule_repeat(job),
}
}
thread::sleep(Duration::from_millis(cfg.frequency_millis));
});
tx
}
pub fn execute_once_jobs(&mut self) {
let (send, keep): (Vec<OnceJob>, Vec<OnceJob>) = self
.once_jobs
.drain(..)
.partition(|j| Instant::now() >= j.send_at);
// send those messages where the 'send_at' time has been reached or elapsed
for job in send {
job.send();
}
// for those messages that are not to be sent yet, just put them back on the vec
for job in keep {
self.once_jobs.push(job);
}
}
pub fn execute_repeat_jobs(&mut self) {
for job in self.repeat_jobs.iter_mut() {
if Instant::now() >= job.send_at {
job.send_at = Instant::now() + job.interval;
job.send();
}
}
}
pub fn cancel(&mut self, id: &Uuid) {
// slightly sub optimal way of canceling because we don't know the job type
// so need to do the remove on both vecs
if let Some(pos) = self.once_jobs.iter().position(|job| &job.id == id) {
self.once_jobs.remove(pos);
}
if let Some(pos) = self.repeat_jobs.iter().position(|job| &job.id == id) {
self.repeat_jobs.remove(pos);
}
}
pub fn schedule_once(&mut self, job: OnceJob) {
if Instant::now() >= job.send_at {
job.send();
} else {
self.once_jobs.push(job);
}
}
pub fn schedule_repeat(&mut self, mut job: RepeatJob) {
if Instant::now() >= job.send_at {
job.send();
}
self.repeat_jobs.push(job);
}
}
struct BasicTimerConfig {
frequency_millis: u64,
}
impl<'a> From<&'a Config> for BasicTimerConfig {
fn from(config: &Config) -> Self {
BasicTimerConfig {
frequency_millis: config.get_int("scheduler.frequency_millis").unwrap() as u64,
}
}
}
| 23.265306 | 91 | 0.541228 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.