hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
288f0f14bb0f7e08ed76d3e48a4e1ac59066448c
2,227
use core::pin::Pin; use futures_core::stream::{FusedStream, Stream}; use futures_core::task::{Waker, Poll}; use futures_sink::Sink; use pin_utils::{unsafe_pinned, unsafe_unpinned}; /// Stream for the [`skip`](super::StreamExt::skip) method. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct Skip<St> { stream: St, remaining: u64, } impl<St: Unpin> Unpin for Skip<St> {} impl<St: Stream> Skip<St> { unsafe_pinned!(stream: St); unsafe_unpinned!(remaining: u64); pub(super) fn new(stream: St, n: u64) -> Skip<St> { Skip { stream, remaining: n, } } /// Acquires a reference to the underlying stream that this combinator is /// pulling from. pub fn get_ref(&self) -> &St { &self.stream } /// Acquires a mutable reference to the underlying stream that this /// combinator is pulling from. /// /// Note that care must be taken to avoid tampering with the state of the /// stream which may otherwise confuse this combinator. pub fn get_mut(&mut self) -> &mut St { &mut self.stream } /// Consumes this combinator, returning the underlying stream. /// /// Note that this may discard intermediate state of this combinator, so /// care should be taken to avoid losing resources when this is called. pub fn into_inner(self) -> St { self.stream } } impl<St: FusedStream> FusedStream for Skip<St> { fn is_terminated(&self) -> bool { self.stream.is_terminated() } } impl<St: Stream> Stream for Skip<St> { type Item = St::Item; fn poll_next( mut self: Pin<&mut Self>, waker: &Waker, ) -> Poll<Option<St::Item>> { while self.remaining > 0 { match ready!(self.as_mut().stream().poll_next(waker)) { Some(_) => *self.as_mut().remaining() -= 1, None => return Poll::Ready(None), } } self.as_mut().stream().poll_next(waker) } } // Forwarding impl of Sink from the underlying stream impl<S, Item> Sink<Item> for Skip<S> where S: Stream + Sink<Item>, { type SinkError = S::SinkError; delegate_sink!(stream, Item); }
26.2
77
0.608891
f554cc10e1f5f59e4c24463058728615dcc17e9e
523
use std::fs::File as OsFile; use std::io::{BufRead, BufReader}; use std::path::Path; pub struct File { file: Option<OsFile>, } impl File { pub fn new<T: AsRef<Path>>(path: T) -> File { match OsFile::open(path) { Err(_) => File { file: None }, Ok(file) => File { file: Some(file) }, } } } impl super::DataSource for File { fn iter(self) -> super::Data { match self.file { None => vec![], Some(file) => BufReader::new(file).lines().filter_map(Result::ok).collect(), } } }
20.115385
82
0.56979
76ba85875fb2b6f578ac99f2b65f2b4b31da92d2
18,272
#![cfg(feature = "serde")] //! This file, data.rs.in, and all files in benches/assets are from hematite_nbt: //! https://github.com/PistonDevelopers/hematite_nbt. extern crate criterion; extern crate nbt; extern crate quartz_nbt; extern crate serde; use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode, Throughput}; use fastnbt::{de::from_bytes, stream::Parser, ByteArray, LongArray}; use flate2::read::GzDecoder; use nbt::{de::from_gzip_reader, from_reader, ser::to_writer}; use quartz_nbt::{ io::{read_nbt, write_nbt, Flavor}, serde::{deserialize_from, deserialize_from_buffer, serialize_into_unchecked}, }; use serde::{de::DeserializeOwned, Serialize}; use std::{ fs::File, io::{self, Cursor, Read, Seek, SeekFrom}, time::Duration, }; fn inflate(buf: &[u8]) -> Vec<u8> { let mut decoder = GzDecoder::new(buf); let mut dest = Vec::new(); decoder.read_to_end(&mut dest).unwrap(); dest } fn hematite_bench<T>(filename: &str, c: &mut Criterion) where T: DeserializeOwned + Serialize { let mut file = File::open(filename).unwrap(); let mut contents = Vec::new(); file.read_to_end(&mut contents).unwrap(); let mut src = Cursor::new(&contents[..]); file.seek(SeekFrom::Start(0)).unwrap(); let nbt_struct: T = from_gzip_reader(&mut file).unwrap(); file.seek(SeekFrom::Start(0)).unwrap(); let nbt_blob = nbt::Blob::from_gzip_reader(&mut file).unwrap(); let uncompressed = inflate(&contents); let mut uncompressed_src = Cursor::new(&uncompressed[..]); let mut group = c.benchmark_group(filename); group.sampling_mode(SamplingMode::Flat); group.throughput(Throughput::Bytes(contents.len() as u64)); group.bench_function("Hematite: Deserialize As Struct (Compressed)", |b| { b.iter(|| { src.seek(SeekFrom::Start(0)).unwrap(); let _: T = from_gzip_reader(&mut src).unwrap(); }) }); group.bench_function("Hematite: Deserialize As Struct (Uncompressed)", |b| { b.iter(|| { uncompressed_src.seek(SeekFrom::Start(0)).unwrap(); let _: T = from_reader(&mut uncompressed_src).unwrap(); }) }); group.bench_function("Hematite: Deserialize As Blob (Compressed)", |b| { b.iter(|| { src.seek(SeekFrom::Start(0)).unwrap(); nbt::Blob::from_gzip_reader(&mut src).unwrap(); }) }); group.bench_function("Hematite: Deserialize As Blob (Uncompressed)", |b| { b.iter(|| { uncompressed_src.seek(SeekFrom::Start(0)).unwrap(); nbt::Blob::from_reader(&mut uncompressed_src).unwrap(); }) }); group.bench_function("Hematite: Serialize As Struct", |b| { b.iter(|| { to_writer(&mut io::sink(), &nbt_struct, None).unwrap(); }) }); group.bench_function("Hematite: Serialize As Blob", |b| { b.iter(|| { nbt_blob.to_writer(&mut io::sink()).unwrap(); }) }); group.finish(); } fn fastnbt_bench<T>(filename: &str, c: &mut Criterion) where T: DeserializeOwned + Serialize { let mut file = File::open(filename).unwrap(); let mut contents = Vec::new(); file.read_to_end(&mut contents).unwrap(); let uncompressed = inflate(&contents); let mut group = c.benchmark_group(filename); group.sampling_mode(SamplingMode::Flat); group.throughput(Throughput::Bytes(contents.len() as u64)); group.bench_function("Fastnbt: Deserialize As Struct (Uncompressed)", |b| { b.iter(|| { black_box(from_bytes::<T>(&uncompressed).unwrap()); }) }); group.bench_function("Fastnbt: Deserialize As Compound (Uncompressed)", |b| { b.iter(|| { black_box( Parser::new(Cursor::new(&uncompressed[..])) .next() .unwrap() ); }) }); group.finish(); } fn quartz_bench<T>(filename: &str, c: &mut Criterion) where T: DeserializeOwned + Serialize { let mut file = File::open(filename).unwrap(); let mut contents = Vec::new(); file.read_to_end(&mut contents).unwrap(); let mut src = std::io::Cursor::new(&contents[..]); file.seek(SeekFrom::Start(0)).unwrap(); let nbt_struct: T = deserialize_from(&mut file, Flavor::GzCompressed).unwrap().0; file.seek(SeekFrom::Start(0)).unwrap(); let nbt_compound = read_nbt(&mut file, Flavor::GzCompressed).unwrap().0; let uncompressed = inflate(&contents); let mut uncompressed_src = Cursor::new(&uncompressed[..]); let mut group = c.benchmark_group(filename); group.sampling_mode(SamplingMode::Flat); group.throughput(Throughput::Bytes(contents.len() as u64)); group.bench_function("Quartz: Deserialize As Struct (Compressed)", |b| { b.iter(|| { src.seek(SeekFrom::Start(0)).unwrap(); black_box( deserialize_from::<_, T>(&mut src, Flavor::GzCompressed) .unwrap() .0, ); }) }); group.bench_function("Quartz: Deserialize As Struct (Uncompressed)", |b| { b.iter(|| { black_box(deserialize_from_buffer::<T>(&uncompressed).unwrap().0); }) }); group.bench_function("Quartz: Deserialize As Compound (Compressed)", |b| { b.iter(|| { src.seek(SeekFrom::Start(0)).unwrap(); black_box(read_nbt(&mut src, Flavor::GzCompressed).unwrap().0); }) }); group.bench_function("Quartz: Deserialize As Compound (Uncompressed)", |b| { b.iter(|| { uncompressed_src.seek(SeekFrom::Start(0)).unwrap(); black_box( read_nbt(&mut uncompressed_src, Flavor::Uncompressed) .unwrap() .0, ); }) }); group.bench_function("Quartz: Serialize As Struct", |b| { b.iter(|| { serialize_into_unchecked(&mut io::sink(), &nbt_struct, None, Flavor::Uncompressed) .unwrap(); }) }); group.bench_function("Quartz: Serialize As Compound", |b| { b.iter(|| { write_nbt(&mut io::sink(), None, &nbt_compound, Flavor::Uncompressed).unwrap(); }) }); group.finish(); } fn bench(c: &mut Criterion) { // hematite_bench::<data::Big1>("benches/assets/big1.nbt", c); fastnbt_bench::<data::Big1::<ByteArray, LongArray>>("benches/assets/big1.nbt", c); quartz_bench::<data::Big1>("benches/assets/big1.nbt", c); // hematite_bench::<data::PlayerData>("benches/assets/simple_player.dat", c); fastnbt_bench::<data::PlayerData>("benches/assets/simple_player.dat", c); quartz_bench::<data::PlayerData>("benches/assets/simple_player.dat", c); // hematite_bench::<data::PlayerData>("benches/assets/complex_player.dat", c); fastnbt_bench::<data::PlayerData>("benches/assets/complex_player.dat", c); quartz_bench::<data::PlayerData>("benches/assets/complex_player.dat", c); // hematite_bench::<data::Level>("benches/assets/level.dat", c); fastnbt_bench::<data::Level>("benches/assets/level.dat", c); quartz_bench::<data::Level>("benches/assets/level.dat", c); } criterion_group! { name = benches; config = Criterion::default() .sample_size(500) .warm_up_time(Duration::from_secs(1)); targets = bench } criterion_main!(benches); mod data { use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Deserialize)] pub struct Small1 { name: String } #[derive(Debug, PartialEq, Deserialize)] pub struct Small2Sub { #[serde(rename = "1")] one: i8, #[serde(rename = "2")] two: i16, #[serde(rename = "3")] three: i32, } #[derive(Debug, PartialEq, Deserialize)] pub struct Small2 { aaa: Small2Sub, bbb: Small2Sub, } #[derive(Debug, PartialEq, Deserialize)] pub struct Small3Sub { ccc: i32, name: String, } #[derive(Debug, PartialEq, Deserialize)] pub struct Small3 { bbb: Vec<Small3Sub>, } #[derive(Debug, PartialEq, Deserialize)] pub struct Small4Sub { aaa: i8, bbb: i8, ccc: i8, ddd: i8, } #[derive(Debug, PartialEq, Deserialize)] pub struct Small4 { c1: Small4Sub, c2: Small4Sub, } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Big1Sub1 { name: String, #[serde(rename = "created-on")] created_on: i64, } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Big1Sub2 { name: String, value: f32, } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Big1Sub3 { ham: Big1Sub2, egg: Big1Sub2, } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Big1<I = Vec<i8>, L = Vec<i64>> { #[serde(rename = "listTest (compound)")] list_test_compound: Vec<Big1Sub1>, #[serde(rename = "longTest")] long_test: i64, #[serde(rename = "shortTest")] short_test: i32, #[serde(rename = "byteTest")] byte_test: i8, #[serde(rename = "floatTest")] float_test: f64, #[serde(rename = "nested compound test")] nested_compound_test: Big1Sub3, #[serde(rename = "byteArrayTest (the first 1000 values of (n*n*255+n*7)%100, starting with n=0 (0, 62, 34, 16, 8, ...))")] byte_array_test: I, // [i8; 1000] does not implement PartialEq. #[serde(rename = "stringTest")] string_test: String, #[serde(rename = "listTest (long)")] list_test_long: L, #[serde(rename = "doubleTest")] double_test: f64, #[serde(rename = "intTest")] int_test: i32, } #[derive(Debug, Serialize, Deserialize)] pub struct Level { #[serde(rename = "Data")] pub data: LevelData } #[derive(Debug, Serialize, Deserialize)] pub struct LevelData { #[serde(rename = "RandomSeed")] seed: i64, #[serde(rename = "DayTime")] daytime: i64, #[serde(rename = "Player")] player: PlayerData, initialized: bool, version: i32, #[serde(rename = "allowCommands")] allow_commands: bool, #[serde(rename = "LastPlayed")] last_played: i64, #[serde(rename = "SpawnZ")] spawn_z: i32, #[serde(rename = "SpawnX")] spawn_x: i32, #[serde(rename = "SpawnY")] spawn_y: i32, #[serde(rename = "LevelName")] name: String, #[serde(rename = "MapFeatures")] map_features: bool, #[serde(rename = "GameType")] game_type: i32, #[serde(rename = "Difficulty")] difficulty: i8, #[serde(rename = "DifficultyLocked")] difficulty_locked: bool, #[serde(rename = "generatorName")] generator_name: String, #[serde(rename = "generatorOptions")] generator_options: String, #[serde(rename = "generatorVersion")] generator_version: i32, #[serde(rename = "Time")] time: i64, #[serde(rename = "clearWeatherTime")] clear_weather_time: i32, #[serde(rename = "thunderTime")] thunder_time: i32, #[serde(rename = "rainTime")] rain_time: i32, thundering: bool, raining: bool, hardcore: bool, #[serde(rename = "GameRules")] game_rules: GameRules, #[serde(rename = "SizeOnDisk")] size_on_disk: i64, #[serde(rename = "BorderCenterX")] border_center_x: f64, #[serde(rename = "BorderCenterY")] border_center_y: Option<f64>, #[serde(rename = "BorderCenterZ")] border_center_z: f64, #[serde(rename = "BorderWarningBlocks")] border_warning_blocks: f64, #[serde(rename = "BorderWarningTime")] border_warning_time: f64, #[serde(rename = "BorderSafeZone")] border_safe_zone: f64, #[serde(rename = "BorderSize")] border_size: f64, #[serde(rename = "BorderSizeLerpTarget")] border_size_lerp_target: f64, #[serde(rename = "BorderSizeLerpTime")] border_size_lerp_time: i64, #[serde(rename = "BorderDamagePerBlock")] border_damage_per_block: f64, } #[derive(Debug, Serialize, Deserialize)] pub struct PlayerData { #[serde(rename = "PersistentId")] persistant_id: Option<i32>, #[serde(rename = "playerGameType")] game_type: i32, abilities: PlayerAbilityData, #[serde(rename = "Score")] score: Option<i32>, #[serde(rename = "Dimension")] dimension: i32, #[serde(rename = "OnGround")] on_ground: bool, #[serde(rename = "FallDistance")] fall_distance: f32, #[serde(rename = "Motion")] motion: Vec<f64>, // [f64; 3] #[serde(rename = "Pos")] position: Vec<f64>, // [f64; 3] #[serde(rename = "Rotation")] rotation: Vec<f32>, // [f32; 2] #[serde(rename = "SpawnX")] spawn_x: i32, #[serde(rename = "SpawnY")] spawn_y: i32, #[serde(rename = "SpawnZ")] spawn_z: i32, #[serde(rename = "SpawnForced")] spawn_forced: Option<bool>, #[serde(rename = "PortalCooldown")] portal_cooldown: Option<i32>, #[serde(rename = "Invulnerable")] invulnerable: Option<bool>, #[serde(rename = "AttackTime")] attack_time: Option<i16>, #[serde(rename = "HurtTime")] hurt_time: i16, #[serde(rename = "HurtByTimestamp")] hurt_by: Option<i32>, #[serde(rename = "DeathTime")] death_time: i16, #[serde(rename = "Sleeping")] sleeping: bool, #[serde(rename = "SleepTimer")] sleep_timer: i16, #[serde(rename = "Health")] health: i16, #[serde(rename = "HealF")] heal: Option<f32>, #[serde(rename = "foodLevel")] food_level: i32, #[serde(rename = "foodTickTimer")] food_tick_timer: i32, #[serde(rename = "foodSaturationLevel")] food_saturation_level: f32, #[serde(rename = "foodExhaustionLevel")] food_exhaustion_level: f32, #[serde(rename = "Fire")] fire: i16, #[serde(rename = "Air")] air: i16, #[serde(rename = "XpP")] xp_p: f32, #[serde(rename = "XpLevel")] xp_level: i32, #[serde(rename = "XpTotal")] xp_total: i32, #[serde(rename = "XpSeed")] xp_seed: Option<i32>, #[serde(rename = "Inventory")] inventory: Vec<InventoryEntry>, #[serde(rename = "EnderItems")] ender_items: Vec<i8>, #[serde(rename = "SelectedItemSlot")] selected_item_slot: Option<i32>, #[serde(rename = "SelectedItem")] selected_item: Option<InventoryEntry>, #[serde(rename = "UUIDLeast")] uuid_least: Option<i64>, #[serde(rename = "UUIDMost")] uuid_most: Option<i64>, #[serde(rename = "AbsorptionAmount")] absorbtion_amount: Option<f32>, #[serde(rename = "Attributes")] attributes: Option<Vec<AttributeEntry>>, #[serde(rename = "ActiveEffects")] active_effects: Option<Vec<ActiveEffect>>, } #[derive(Debug, Serialize, Deserialize)] pub struct PlayerAbilityData { invulnerable: bool, instabuild: bool, flying: bool, #[serde(rename = "flySpeed")] fly_speed: f32, #[serde(rename = "walkSpeed")] walk_speed: f32, #[serde(rename = "mayBuild")] may_build: bool, #[serde(rename = "mayfly")] may_fly: bool, } #[derive(Debug, Serialize, Deserialize)] pub struct InventoryEntry { id: String, #[serde(rename = "Slot")] slot: Option<i8>, #[serde(rename = "Count")] count: i8, #[serde(rename = "Damage")] damage: i16, #[serde(rename = "tag")] info: Option<InventoryEntryInfo>, } #[derive(Debug, Serialize, Deserialize)] pub struct InventoryEntryInfo { display: Option<InventoryEntryDisplay>, #[serde(rename = "RepairCost")] repair_cost: Option<i32>, #[serde(rename = "ench")] enchantments: Vec<Enchantment>, } #[derive(Debug, Serialize, Deserialize)] pub struct InventoryEntryDisplay { #[serde(rename = "Name")] name: String, } #[derive(Debug, Serialize, Deserialize)] pub struct Enchantment { id: i16, #[serde(rename = "lvl")] level: i16, } #[derive(Debug, Serialize, Deserialize)] pub struct EnderItemsEntry { id: String, } #[derive(Debug, Serialize, Deserialize)] pub struct AttributeEntry { #[serde(rename = "Name")] name: String, #[serde(rename = "Base")] base: f64, #[serde(rename = "Modifiers")] modifiers: Option<Vec<AttributeModifier>>, } #[derive(Debug, Serialize, Deserialize)] pub struct AttributeModifier { #[serde(rename = "Name")] name: String, #[serde(rename = "Amount")] amount: f64, #[serde(rename = "Operation")] operation: i32, #[serde(rename = "UUIDLeast")] uuid_least: i64, #[serde(rename = "UUIDMost")] uuid_most: i64, } #[derive(Debug, Serialize, Deserialize)] pub struct ActiveEffect { #[serde(rename = "Id")] id: i8, #[serde(rename = "Duration")] base: i32, #[serde(rename = "Ambient")] ambient: bool, #[serde(rename = "Amplifier")] amplifier: bool, #[serde(rename = "ShowParticles")] show_particles: bool, } #[derive(Debug, Serialize, Deserialize)] pub struct GameRules { #[serde(rename = "doMobLoot")] mob_loot: String, #[serde(rename = "doTileDrops")] tile_drops: String, #[serde(rename = "doFireTick")] fire_tick: String, #[serde(rename = "mobGriefing")] mob_griefing: String, #[serde(rename = "commandBlockOutput")] command_block_output: String, #[serde(rename = "doMobSpawning")] mob_spawning: String, #[serde(rename = "keepInventory")] keep_inventory: String, #[serde(rename = "showDeathMessages")] show_death_messages: String, #[serde(rename = "doEntityDrops")] entity_drops: String, #[serde(rename = "naturalRegeneration")] natural_regeneration: String, #[serde(rename = "logAdminCommands")] log_admin_commands: String, #[serde(rename = "doDaylightCycle")] daylight_cycle: String, #[serde(rename = "sendCommandFeedback")] send_command_feedback: String, #[serde(rename = "randomTickSpeed")] random_tick_speed: String, #[serde(rename = "reducedDebugInfo")] reduced_debug_info: String, } }
38.386555
130
0.607761
c1eeb523bb904292b69b6c1d458cb70ecbd045a2
4,470
#![deny(warnings)] use sauron::prelude::*; use std::{cell::RefCell, rc::Rc}; use test_fixtures::simple_program; use wasm_bindgen_test::*; use web_sys::InputEvent; mod test_fixtures; wasm_bindgen_test_configure!(run_in_browser); // Verify that our DomUpdater's patch method works. // We test a simple case here, since diff_patch.rs is responsible for testing more complex // diffing and patching. #[wasm_bindgen_test] fn patches_dom() { console_error_panic_hook::set_once(); let document = web_sys::window().unwrap().document().unwrap(); let vdom: Node<()> = div(vec![], vec![]); let simple_program = simple_program(); let mut dom_updater = DomUpdater::new_append_to_mount( &simple_program, vdom, &sauron_core::body(), ); let new_vdom = div(vec![id("patched")], vec![]); //html! { <div id="patched"></div> }; dom_updater.update_dom(&simple_program, new_vdom); assert_eq!(document.query_selector("#patched").unwrap().is_some(), true); } // When you replace a DOM node with another DOM node we need to make sure that the closures // from the new DOM node are stored by the DomUpdater otherwise they'll get dropped and // won't work. #[wasm_bindgen_test] fn updates_active_closure_on_replace() { console_error_panic_hook::set_once(); let body = sauron_core::body(); let simple_program = simple_program(); let old = div(vec![], vec![]); let mut dom_updater = DomUpdater::new_append_to_mount(&simple_program, old, &body); let text = Rc::new(RefCell::new("Start Text".to_string())); let text_clone = Rc::clone(&text); let elem_id = "update-active-closures-on-replace"; let replace_node = input( vec![ id(elem_id), on_input(move |event: sauron_core::html::events::InputEvent| { *text_clone.borrow_mut() = event.value.to_string(); }), value("End Text"), ], vec![], ); // New node replaces old node. // We are testing that we've stored this new node's closures even though `new` will be dropped // at the end of this block. dom_updater.update_dom(&simple_program, replace_node); let input_event = InputEvent::new("input").unwrap(); assert_eq!(&*text.borrow(), "Start Text"); // After dispatching the on_input event our `text` should have a value of the input elements value. let input = sauron_core::document().get_element_by_id(&elem_id).unwrap(); web_sys::EventTarget::from(input) .dispatch_event(&input_event) .unwrap(); assert_eq!(&*text.borrow(), "End Text"); } // When you replace a DOM node with another DOM node we need to make sure that the closures // from the new DOM node are stored by the DomUpdater otherwise they'll get dropped and // won't work. #[wasm_bindgen_test] fn updates_active_closures_on_append() { console_error_panic_hook::set_once(); let body = sauron_core::body(); let old = div(vec![], vec![]); let simple_program = simple_program(); let mut dom_updater = DomUpdater::new_append_to_mount(&simple_program, old, &body); let text = Rc::new(RefCell::new("Start Text".to_string())); let text_clone = Rc::clone(&text); let elem_id = "update-active-closures-on-append"; { let append_node = div( vec![], vec![input( vec![ id(elem_id), on_input( move |event: sauron_core::html::events::InputEvent| { *text_clone.borrow_mut() = event.value.to_string(); }, ), value("End Text"), ], vec![], )], ); // New node gets appended into the DOM. // We are testing that we've stored this new node's closures even though `new` will be dropped // at the end of this block. dom_updater.update_dom(&simple_program, append_node); } let input_event = InputEvent::new("input").unwrap(); assert_eq!(&*text.borrow(), "Start Text"); // After dispatching the on_input event our `text` should have a value of the input elements value. let input = sauron_core::document().get_element_by_id(elem_id).unwrap(); web_sys::EventTarget::from(input) .dispatch_event(&input_event) .unwrap(); assert_eq!(&*text.borrow(), "End Text"); }
32.627737
103
0.626174
0802f16f45cdab27ab921b4458f7b08f4caceaff
4,079
use std::ffi::c_void; use std::{io, ptr}; use widestring::U16CString; use winapi::shared::windef::RECT; use winapi::shared::winerror::E_FAIL; use winapi::shared::wtypesbase::CLSCTX_LOCAL_SERVER; use winapi::um::combaseapi::{CoCreateInstance, CoTaskMemFree, CoUninitialize}; use winapi::um::objbase::CoInitialize; use winapi::um::shobjidl_core::{CLSID_DesktopWallpaper, IDesktopWallpaper, DWPOS_CENTER}; use winapi::Interface; use crate::directories::ids::WallpaperID; #[derive(Debug, Clone)] pub struct Monitor { pub width: u32, pub height: u32, pub path: U16CString, } // Just return an empty monitors list rather than panicking fn check(result: i32) -> Result<(), io::Error> { if result == 0 { Ok(()) } else { let e = io::Error::from_raw_os_error(result); println!("{:?}", io::Error::from_raw_os_error(result)); Err(e) } } unsafe fn get_monitor(dtop: &IDesktopWallpaper, n: u32) -> Result<Option<Monitor>, io::Error> { let mut monitor_id: *mut u16 = ptr::null_mut(); check(dtop.GetMonitorDevicePathAt(n, &mut monitor_id))?; assert!(!monitor_id.is_null()); let path = U16CString::from_ptr_str(monitor_id); // We can free the memory Windows allocated immediately. CoTaskMemFree(monitor_id as *mut c_void); let mut rect = RECT { left: 0, top: 0, right: 0, bottom: 0, }; let result = dtop.GetMonitorRECT(path.as_ptr(), &mut rect); if result != E_FAIL { check(result)?; } else { // Sometimes windows hallucinates monitors. Hell if I know. Ignore E_FAIL failures here. return Ok(None); } Ok(Some(Monitor { width: (rect.right - rect.left) as u32, height: (rect.bottom - rect.top) as u32, path, })) } // In error cases this can leak but we'll be closing the program anyway. pub fn list() -> Vec<Monitor> { let monitors: Result<_, io::Error> = (|| unsafe { check(CoInitialize(ptr::null_mut()))?; let mut monitors = Vec::new(); let mut desktop: *mut IDesktopWallpaper = ptr::null_mut(); check(CoCreateInstance( &CLSID_DesktopWallpaper, ptr::null_mut(), CLSCTX_LOCAL_SERVER, &IDesktopWallpaper::uuidof(), &mut desktop as *mut *mut IDesktopWallpaper as *mut *mut c_void, ))?; let dtop = desktop.as_ref().unwrap(); let mut monitor_count: u32 = 0; check(dtop.GetMonitorDevicePathCount(&mut monitor_count as *mut u32))?; for n in 0..monitor_count { if let Some(m) = get_monitor(dtop, n)? { monitors.push(m); } } dtop.Release(); CoUninitialize(); Ok(monitors) })(); match monitors { Ok(v) => v, Err(_) => Vec::new(), } } pub fn set_wallpapers(wallpapers: &[(&impl WallpaperID, &[Monitor])], _temp: bool) { // TODO -- maybe set legacy registry keys. Likely useless but I want to be sure. let wallmons: Vec<_> = wallpapers .iter() .map(move |(wid, ms)| ms.iter().map(move |m| (wid, m))) .flatten() .collect(); let r: Result<_, io::Error> = (|| unsafe { check(CoInitialize(ptr::null_mut()))?; let mut desktop: *mut IDesktopWallpaper = ptr::null_mut(); check(CoCreateInstance( &CLSID_DesktopWallpaper, ptr::null_mut(), CLSCTX_LOCAL_SERVER, &IDesktopWallpaper::uuidof(), &mut desktop as *mut *mut IDesktopWallpaper as *mut *mut c_void, ))?; let dtop = desktop.as_ref().unwrap(); check(dtop.SetPosition(DWPOS_CENTER))?; for (wid, m) in wallmons { let u16_path = U16CString::from_os_str(wid.cached_abs_path(m, &wid.get_props(m))) .expect("Invalid wallpaper path containing null"); check(dtop.SetWallpaper(m.path.as_ptr(), u16_path.as_ptr()))?; } dtop.Release(); CoUninitialize(); Ok(()) })(); drop(r); }
28.725352
96
0.593038
0a59992cda1de54b5cc782390674bc4eed968791
5,736
//! Authentication errors that occur in the crate's source code must be handled. //! Due to rusts limited capabilites to register / overwrite code from other traits //! some helper functions are provided to customize the error responses. use std::collections::HashMap; use actix_web::dev::HttpResponseBuilder; use actix_web::http::{header, StatusCode}; use actix_web::{error, HttpResponse}; use once_cell::sync::Lazy; use crate::authentication::error::error_type::AuthenticationError; // Status code must be 100 <= code <= 1000 static AUTH_ERROR_STATUS_CODE_MAPPING: Lazy<HashMap<AuthenticationError, u16>> = Lazy::new(|| { let mut error_codes: HashMap<AuthenticationError, u16> = HashMap::new(); add_env_error_code(AuthenticationError::InvalidAuthentication, &mut error_codes); add_env_error_code(AuthenticationError::InvalidToken, &mut error_codes); add_env_error_code( AuthenticationError::InvalidAuthorizationHeader, &mut error_codes, ); add_env_error_code(AuthenticationError::UsernameNotFound, &mut error_codes); add_env_error_code( AuthenticationError::AuthorizationHeaderNotSet, &mut error_codes, ); error_codes }); static AUTH_ERROR_MESSAGE_MAPPING: Lazy<HashMap<AuthenticationError, String>> = Lazy::new(|| { let mut error_messages: HashMap<AuthenticationError, String> = HashMap::new(); add_env_error_message( AuthenticationError::InvalidAuthentication, "invalid authentication".to_string(), &mut error_messages, ); add_env_error_message( AuthenticationError::InvalidToken, "access denied".to_string(), &mut error_messages, ); add_env_error_message( AuthenticationError::InvalidAuthorizationHeader, "invalid authorization header".to_string(), &mut error_messages, ); add_env_error_message( AuthenticationError::UsernameNotFound, "access denied".to_string(), &mut error_messages, ); add_env_error_message( AuthenticationError::AuthorizationHeaderNotSet, "authorization header not set".to_string(), &mut error_messages, ); error_messages }); static AUTH_ERROR_CONTENT_TYPE: Lazy<String> = Lazy::new(|| match std::env::var("AUTH_ERROR_CONTENT_TYPE") { Ok(content_type) => content_type, _ => "text/html; charset=utf-8".to_string(), }); fn add_env_error_code( error: AuthenticationError, error_codes: &mut HashMap<AuthenticationError, u16>, ) { match std::env::var(format!("{}_code", error)) { Ok(code) => error_codes.insert( error, code.parse::<u16>().expect("Invalid status code mapping"), ), _ => error_codes.insert(error, 401), }; } fn add_env_error_message( error: AuthenticationError, default_message: String, error_messages: &mut HashMap<AuthenticationError, String>, ) { match std::env::var(format!("{}_message", error)) { Ok(message) => error_messages.insert(error, message), _ => error_messages.insert(error, default_message), }; } /// Errors have a predfined HTTP-Status code that is returned in case an error occurs. /// This status code can be overwritten by calling this function. /// The status code must be in the range: 100 <= code <= 1000 pub fn overwrite_auth_error_status_code(error: AuthenticationError, status_code: u16) { assert!((100..=1000).contains(&status_code), "Invalid status code"); std::env::set_var(format!("{}_code", error), status_code.to_string()); } /// Errors have a predfined text message that is returned in case an error occurs. /// This message can be overwritten by calling this function. pub fn overwrite_auth_error_message(error: AuthenticationError, message: String) { std::env::set_var(format!("{}_message", error), message); } /// Error responses return the content type header `text/html; charset=utf-8` by default. /// The header value can be overwritten by calling this function. pub fn set_auth_error_content_type(content_type: String) { std::env::set_var("AUTH_ERROR_CONTENT_TYPE", content_type); } impl error::ResponseError for AuthenticationError { fn status_code(&self) -> StatusCode { match *self { AuthenticationError::InvalidAuthentication => { dynamic_status_code(&AuthenticationError::InvalidAuthentication) } AuthenticationError::AuthorizationHeaderNotSet => { dynamic_status_code(&AuthenticationError::AuthorizationHeaderNotSet) } AuthenticationError::InvalidAuthorizationHeader => { dynamic_status_code(&AuthenticationError::InvalidAuthorizationHeader) } AuthenticationError::UsernameNotFound => { dynamic_status_code(&AuthenticationError::UsernameNotFound) } AuthenticationError::InvalidToken => { dynamic_status_code(&AuthenticationError::InvalidToken) } } } fn error_response(&self) -> HttpResponse { HttpResponseBuilder::new(self.status_code()) .set_header(header::CONTENT_TYPE, AUTH_ERROR_CONTENT_TYPE.to_string()) .body(dynamic_error_message(self)) } } fn dynamic_status_code(error: &AuthenticationError) -> StatusCode { StatusCode::from_u16( *AUTH_ERROR_STATUS_CODE_MAPPING .get(error) .expect("Status code mapping missing"), ) .expect("Invalid status code mapping found") } fn dynamic_error_message(error: &AuthenticationError) -> String { AUTH_ERROR_MESSAGE_MAPPING .get(error) .expect("Error message mapping missing") .clone() }
37.246753
95
0.690202
26d6c164198289390cd4eec060cb343cfef06bfa
3,080
use nu_protocol::{ ast::Call, engine::{Command, EngineState, Stack}, Category, Example, PipelineData, ShellError, Signature, SyntaxShape, }; #[derive(Clone)] pub struct Exec; impl Command for Exec { fn name(&self) -> &str { "exec" } fn signature(&self) -> Signature { Signature::build("exec") .required("command", SyntaxShape::String, "the command to execute") .rest( "rest", SyntaxShape::String, "any additional arguments for the command", ) .category(Category::System) } fn usage(&self) -> &str { "Execute a command, replacing the current process." } fn extra_usage(&self) -> &str { "Currently supported only on Unix-based systems." } fn run( &self, engine_state: &EngineState, stack: &mut Stack, call: &Call, _input: PipelineData, ) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> { exec(engine_state, stack, call) } fn examples(&self) -> Vec<Example> { vec![ Example { description: "Execute external 'ps aux' tool", example: "exec ps aux", result: None, }, Example { description: "Execute 'nautilus'", example: "exec nautilus", result: None, }, ] } } #[cfg(unix)] fn exec( engine_state: &EngineState, stack: &mut Stack, call: &Call, ) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> { use std::os::unix::process::CommandExt; use nu_engine::{current_dir, env_to_strings, CallExt}; use nu_protocol::Spanned; use super::run_external::ExternalCommand; let name: Spanned<String> = call.req(engine_state, stack, 0)?; let name_span = name.span; let args: Vec<Spanned<String>> = call.rest(engine_state, stack, 1)?; let cwd = current_dir(engine_state, stack)?; let config = stack.get_config()?; let env_vars = env_to_strings(engine_state, stack, &config)?; let current_dir = current_dir(engine_state, stack)?; let external_command = ExternalCommand { name, args, env_vars, redirect_stdout: true, redirect_stderr: false, }; let mut command = external_command.spawn_simple_command(&cwd.to_string_lossy().to_string())?; command.current_dir(current_dir); println!("{:#?}", command); let err = command.exec(); // this replaces our process, should not return Err(ShellError::SpannedLabeledError( "Error on exec".to_string(), err.to_string(), name_span, )) } #[cfg(not(unix))] fn exec( _engine_state: &EngineState, _stack: &mut Stack, call: &Call, ) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> { Err(ShellError::SpannedLabeledError( "Error on exec".to_string(), "exec is not supported on your platform".to_string(), call.head, )) }
26.551724
97
0.586039
388e09da01bb033b8d20fed9df51ec5be4973cb5
605
// TODO // Tests // Examples // Comment code // Doc // Two times controlled gates ? // Benches ? // Constify ? // Publish ?? // cargo doc --no-deps --open extern crate ocl; // Modules mod complex; mod computer; mod gates; mod measure; mod program; mod random; const MEASUREMENTS_BLOCK: usize = 1024; #[inline] pub(crate) fn approx_eq(x: f32, y: f32) -> bool { (x - y).abs() < f32::EPSILON } // Exports pub use complex::c64; pub use computer::{Address, Computer, ComputerBuilder}; pub use gates::Gate; pub use measure::Measurements; pub use program::{InstructionChain, Program, ProgramBuilder};
16.351351
61
0.684298
2863f0b6f8eb2f9e08cee5252bd27166863e6b32
4,628
#[doc = "Reader of register OSC_CONFIG2_TOG"] pub type R = crate::R<u32, super::OSC_CONFIG2_TOG>; #[doc = "Writer for register OSC_CONFIG2_TOG"] pub type W = crate::W<u32, super::OSC_CONFIG2_TOG>; #[doc = "Register OSC_CONFIG2_TOG `reset()`'s with value 0x0001_02e2"] impl crate::ResetValue for super::OSC_CONFIG2_TOG { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x0001_02e2 } } #[doc = "Reader of field `COUNT_1M_TRG`"] pub type COUNT_1M_TRG_R = crate::R<u16, u16>; #[doc = "Write proxy for field `COUNT_1M_TRG`"] pub struct COUNT_1M_TRG_W<'a> { w: &'a mut W, } impl<'a> COUNT_1M_TRG_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0x0fff) | ((value as u32) & 0x0fff); self.w } } #[doc = "Reader of field `ENABLE_1M`"] pub type ENABLE_1M_R = crate::R<bool, bool>; #[doc = "Write proxy for field `ENABLE_1M`"] pub struct ENABLE_1M_W<'a> { w: &'a mut W, } impl<'a> ENABLE_1M_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } #[doc = "Reader of field `MUX_1M`"] pub type MUX_1M_R = crate::R<bool, bool>; #[doc = "Write proxy for field `MUX_1M`"] pub struct MUX_1M_W<'a> { w: &'a mut W, } impl<'a> MUX_1M_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17); self.w } } #[doc = "Reader of field `CLK_1M_ERR_FL`"] pub type CLK_1M_ERR_FL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `CLK_1M_ERR_FL`"] pub struct CLK_1M_ERR_FL_W<'a> { w: &'a mut W, } impl<'a> CLK_1M_ERR_FL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } impl R { #[doc = "Bits 0:11 - The target count used to tune the 1MHz clock frequency"] #[inline(always)] pub fn count_1m_trg(&self) -> COUNT_1M_TRG_R { COUNT_1M_TRG_R::new((self.bits & 0x0fff) as u16) } #[doc = "Bit 16 - Enable the 1MHz clock output. 0 - disabled; 1 - enabled."] #[inline(always)] pub fn enable_1m(&self) -> ENABLE_1M_R { ENABLE_1M_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - Mux the corrected or uncorrected 1MHz clock to the output"] #[inline(always)] pub fn mux_1m(&self) -> MUX_1M_R { MUX_1M_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 31 - Flag indicates that the count_1m count wasn't reached within 1 32kHz period"] #[inline(always)] pub fn clk_1m_err_fl(&self) -> CLK_1M_ERR_FL_R { CLK_1M_ERR_FL_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bits 0:11 - The target count used to tune the 1MHz clock frequency"] #[inline(always)] pub fn count_1m_trg(&mut self) -> COUNT_1M_TRG_W { COUNT_1M_TRG_W { w: self } } #[doc = "Bit 16 - Enable the 1MHz clock output. 0 - disabled; 1 - enabled."] #[inline(always)] pub fn enable_1m(&mut self) -> ENABLE_1M_W { ENABLE_1M_W { w: self } } #[doc = "Bit 17 - Mux the corrected or uncorrected 1MHz clock to the output"] #[inline(always)] pub fn mux_1m(&mut self) -> MUX_1M_W { MUX_1M_W { w: self } } #[doc = "Bit 31 - Flag indicates that the count_1m count wasn't reached within 1 32kHz period"] #[inline(always)] pub fn clk_1m_err_fl(&mut self) -> CLK_1M_ERR_FL_W { CLK_1M_ERR_FL_W { w: self } } }
32.363636
99
0.581893
71ebf10979f1d87d7c5a8a594786f650ae0b787b
3,440
use crate::prelude::*; use arrow::array::DictionaryArray; use arrow::compute::cast::cast; mod builder; mod merge; pub use builder::*; impl From<&CategoricalChunked> for DictionaryArray<u32> { fn from(ca: &CategoricalChunked) -> Self { let ca = ca.rechunk(); let keys = ca.downcast_iter().next().unwrap(); let map = &**ca.categorical_map.as_ref().unwrap(); match map { RevMapping::Local(arr) => { DictionaryArray::from_data(keys.clone(), Arc::new(arr.clone())) } RevMapping::Global(reverse_map, values, _uuid) => { let iter = keys .into_iter() .map(|opt_k| opt_k.map(|k| *reverse_map.get(k).unwrap())); let keys = PrimitiveArray::from_trusted_len_iter(iter); DictionaryArray::from_data(keys, Arc::new(values.clone())) } } } } impl From<&CategoricalChunked> for DictionaryArray<i64> { fn from(ca: &CategoricalChunked) -> Self { let ca = ca.rechunk(); let keys = ca.downcast_iter().next().unwrap(); let map = &**ca.categorical_map.as_ref().unwrap(); match map { RevMapping::Local(arr) => DictionaryArray::from_data( cast(keys, &ArrowDataType::Int64) .unwrap() .as_any() .downcast_ref::<PrimitiveArray<i64>>() .unwrap() .clone(), Arc::new(arr.clone()), ), RevMapping::Global(reverse_map, values, _uuid) => { let iter = keys .into_iter() .map(|opt_k| opt_k.map(|k| *reverse_map.get(k).unwrap() as i64)); let keys = PrimitiveArray::from_trusted_len_iter(iter); DictionaryArray::from_data(keys, Arc::new(values.clone())) } } } } #[cfg(test)] mod test { use super::*; use crate::{reset_string_cache, toggle_string_cache, SINGLE_LOCK}; use std::convert::TryFrom; #[test] fn test_categorical_round_trip() -> Result<()> { let _lock = SINGLE_LOCK.lock(); reset_string_cache(); let slice = &[ Some("foo"), None, Some("bar"), Some("foo"), Some("foo"), Some("bar"), ]; let ca = Utf8Chunked::new_from_opt_slice("a", slice); let ca = ca.cast::<CategoricalType>()?; let arr: DictionaryArray<u32> = (&ca).into(); let s = Series::try_from(("foo", Arc::new(arr) as ArrayRef))?; assert_eq!(s.dtype(), &DataType::Categorical); assert_eq!(s.null_count(), 1); assert_eq!(s.len(), 6); Ok(()) } #[test] fn test_append_categorical() { let _lock = SINGLE_LOCK.lock(); reset_string_cache(); toggle_string_cache(true); let mut s1 = Series::new("1", vec!["a", "b", "c"]) .cast::<CategoricalType>() .unwrap(); let s2 = Series::new("2", vec!["a", "x", "y"]) .cast::<CategoricalType>() .unwrap(); let appended = s1.append(&s2).unwrap(); assert_eq!(appended.str_value(0), "\"a\""); assert_eq!(appended.str_value(1), "\"b\""); assert_eq!(appended.str_value(4), "\"x\""); assert_eq!(appended.str_value(5), "\"y\""); } }
32.761905
85
0.513372
eb0a9d335367e5d766a3be5e7e058e05727540d1
992
//! Creates a new BAM file. //! //! This writes a SAM header, reference sequences, and one unmapped record to stdout. //! //! Verify the output by piping to `samtools view --no-PG --with-header`. use noodles_bam as bam; use noodles_sam::{ self as sam, header::{Program, ReferenceSequence}, }; use tokio::io; #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { let mut writer = bam::AsyncWriter::new(io::stdout()); let header = sam::Header::builder() .set_header(Default::default()) .add_reference_sequence(ReferenceSequence::new("sq0", 8)?) .add_program(Program::new("noodles-bam")) .add_comment("an example BAM written by noodles-bam") .build(); writer.write_header(&header).await?; writer .write_reference_sequences(header.reference_sequences()) .await?; let record = bam::Record::default(); writer.write_record(&record).await?; writer.shutdown().await?; Ok(()) }
26.810811
85
0.642137
d7ad7279189751bcfc34d78af0dc8fd3f2bb052a
26,056
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files.git) // DO NOT EDIT use crate::TextBuffer; use crate::TextChildAnchor; use crate::TextMark; use crate::TextSearchFlags; use crate::TextTag; use glib::object::IsA; use glib::translate::*; use std::cmp; glib::wrapper! { #[derive(Debug, Hash)] pub struct TextIter(Boxed<ffi::GtkTextIter>); match fn { copy => |ptr| ffi::gtk_text_iter_copy(ptr), free => |ptr| ffi::gtk_text_iter_free(ptr), init => |_ptr| (), clear => |_ptr| (), get_type => || ffi::gtk_text_iter_get_type(), } } impl TextIter { #[doc(alias = "gtk_text_iter_assign")] pub fn assign(&mut self, other: &TextIter) { unsafe { ffi::gtk_text_iter_assign(self.to_glib_none_mut().0, other.to_glib_none().0); } } #[doc(alias = "gtk_text_iter_backward_char")] pub fn backward_char(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_char(self.to_glib_none_mut().0)) } } #[doc(alias = "gtk_text_iter_backward_chars")] pub fn backward_chars(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_chars( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_backward_cursor_position")] pub fn backward_cursor_position(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_cursor_position( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_backward_cursor_positions")] pub fn backward_cursor_positions(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_cursor_positions( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_backward_find_char")] pub fn backward_find_char<P: FnMut(char) -> bool>( &mut self, pred: P, limit: Option<&TextIter>, ) -> bool { let pred_data: P = pred; unsafe extern "C" fn pred_func<P: FnMut(char) -> bool>( ch: u32, user_data: glib::ffi::gpointer, ) -> glib::ffi::gboolean { let ch = std::convert::TryFrom::try_from(ch) .expect("conversion from an invalid Unicode value attempted"); let callback: *mut P = user_data as *const _ as usize as *mut P; let res = (*callback)(ch); res.to_glib() } let pred = Some(pred_func::<P> as _); let super_callback0: &P = &pred_data; unsafe { from_glib(ffi::gtk_text_iter_backward_find_char( self.to_glib_none_mut().0, pred, super_callback0 as *const _ as usize as *mut _, limit.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_backward_line")] pub fn backward_line(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_line(self.to_glib_none_mut().0)) } } #[doc(alias = "gtk_text_iter_backward_lines")] pub fn backward_lines(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_lines( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_backward_search")] pub fn backward_search( &self, str: &str, flags: TextSearchFlags, limit: Option<&TextIter>, ) -> Option<(TextIter, TextIter)> { unsafe { let mut match_start = TextIter::uninitialized(); let mut match_end = TextIter::uninitialized(); let ret = from_glib(ffi::gtk_text_iter_backward_search( self.to_glib_none().0, str.to_glib_none().0, flags.to_glib(), match_start.to_glib_none_mut().0, match_end.to_glib_none_mut().0, limit.to_glib_none().0, )); if ret { Some((match_start, match_end)) } else { None } } } #[doc(alias = "gtk_text_iter_backward_sentence_start")] pub fn backward_sentence_start(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_sentence_start( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_backward_sentence_starts")] pub fn backward_sentence_starts(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_sentence_starts( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_backward_to_tag_toggle")] pub fn backward_to_tag_toggle<P: IsA<TextTag>>(&mut self, tag: Option<&P>) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_to_tag_toggle( self.to_glib_none_mut().0, tag.map(|p| p.as_ref()).to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_backward_visible_cursor_position")] pub fn backward_visible_cursor_position(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_visible_cursor_position( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_backward_visible_cursor_positions")] pub fn backward_visible_cursor_positions(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_visible_cursor_positions( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_backward_visible_line")] pub fn backward_visible_line(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_visible_line( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_backward_visible_lines")] pub fn backward_visible_lines(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_visible_lines( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_backward_visible_word_start")] pub fn backward_visible_word_start(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_visible_word_start( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_backward_visible_word_starts")] pub fn backward_visible_word_starts(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_visible_word_starts( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_backward_word_start")] pub fn backward_word_start(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_word_start( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_backward_word_starts")] pub fn backward_word_starts(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_backward_word_starts( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_can_insert")] pub fn can_insert(&self, default_editability: bool) -> bool { unsafe { from_glib(ffi::gtk_text_iter_can_insert( self.to_glib_none().0, default_editability.to_glib(), )) } } #[doc(alias = "gtk_text_iter_compare")] fn compare(&self, rhs: &TextIter) -> i32 { unsafe { ffi::gtk_text_iter_compare(self.to_glib_none().0, rhs.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_editable")] pub fn editable(&self, default_setting: bool) -> bool { unsafe { from_glib(ffi::gtk_text_iter_editable( self.to_glib_none().0, default_setting.to_glib(), )) } } #[doc(alias = "gtk_text_iter_ends_line")] pub fn ends_line(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_ends_line(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_ends_sentence")] pub fn ends_sentence(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_ends_sentence(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_ends_tag")] pub fn ends_tag<P: IsA<TextTag>>(&self, tag: Option<&P>) -> bool { unsafe { from_glib(ffi::gtk_text_iter_ends_tag( self.to_glib_none().0, tag.map(|p| p.as_ref()).to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_ends_word")] pub fn ends_word(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_ends_word(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_equal")] fn equal(&self, rhs: &TextIter) -> bool { unsafe { from_glib(ffi::gtk_text_iter_equal( self.to_glib_none().0, rhs.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_forward_char")] pub fn forward_char(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_char(self.to_glib_none_mut().0)) } } #[doc(alias = "gtk_text_iter_forward_chars")] pub fn forward_chars(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_chars( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_forward_cursor_position")] pub fn forward_cursor_position(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_cursor_position( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_forward_cursor_positions")] pub fn forward_cursor_positions(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_cursor_positions( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_forward_find_char")] pub fn forward_find_char<P: FnMut(char) -> bool>( &mut self, pred: P, limit: Option<&TextIter>, ) -> bool { let pred_data: P = pred; unsafe extern "C" fn pred_func<P: FnMut(char) -> bool>( ch: u32, user_data: glib::ffi::gpointer, ) -> glib::ffi::gboolean { let ch = std::convert::TryFrom::try_from(ch) .expect("conversion from an invalid Unicode value attempted"); let callback: *mut P = user_data as *const _ as usize as *mut P; let res = (*callback)(ch); res.to_glib() } let pred = Some(pred_func::<P> as _); let super_callback0: &P = &pred_data; unsafe { from_glib(ffi::gtk_text_iter_forward_find_char( self.to_glib_none_mut().0, pred, super_callback0 as *const _ as usize as *mut _, limit.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_forward_line")] pub fn forward_line(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_line(self.to_glib_none_mut().0)) } } #[doc(alias = "gtk_text_iter_forward_lines")] pub fn forward_lines(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_lines( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_forward_search")] pub fn forward_search( &self, str: &str, flags: TextSearchFlags, limit: Option<&TextIter>, ) -> Option<(TextIter, TextIter)> { unsafe { let mut match_start = TextIter::uninitialized(); let mut match_end = TextIter::uninitialized(); let ret = from_glib(ffi::gtk_text_iter_forward_search( self.to_glib_none().0, str.to_glib_none().0, flags.to_glib(), match_start.to_glib_none_mut().0, match_end.to_glib_none_mut().0, limit.to_glib_none().0, )); if ret { Some((match_start, match_end)) } else { None } } } #[doc(alias = "gtk_text_iter_forward_sentence_end")] pub fn forward_sentence_end(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_sentence_end( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_forward_sentence_ends")] pub fn forward_sentence_ends(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_sentence_ends( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_forward_to_end")] pub fn forward_to_end(&mut self) { unsafe { ffi::gtk_text_iter_forward_to_end(self.to_glib_none_mut().0); } } #[doc(alias = "gtk_text_iter_forward_to_line_end")] pub fn forward_to_line_end(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_to_line_end( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_forward_to_tag_toggle")] pub fn forward_to_tag_toggle<P: IsA<TextTag>>(&mut self, tag: Option<&P>) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_to_tag_toggle( self.to_glib_none_mut().0, tag.map(|p| p.as_ref()).to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_forward_visible_cursor_position")] pub fn forward_visible_cursor_position(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_visible_cursor_position( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_forward_visible_cursor_positions")] pub fn forward_visible_cursor_positions(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_visible_cursor_positions( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_forward_visible_line")] pub fn forward_visible_line(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_visible_line( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_forward_visible_lines")] pub fn forward_visible_lines(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_visible_lines( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_forward_visible_word_end")] pub fn forward_visible_word_end(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_visible_word_end( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_forward_visible_word_ends")] pub fn forward_visible_word_ends(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_visible_word_ends( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_forward_word_end")] pub fn forward_word_end(&mut self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_word_end( self.to_glib_none_mut().0, )) } } #[doc(alias = "gtk_text_iter_forward_word_ends")] pub fn forward_word_ends(&mut self, count: i32) -> bool { unsafe { from_glib(ffi::gtk_text_iter_forward_word_ends( self.to_glib_none_mut().0, count, )) } } #[doc(alias = "gtk_text_iter_get_buffer")] pub fn buffer(&self) -> Option<TextBuffer> { unsafe { from_glib_none(ffi::gtk_text_iter_get_buffer(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_get_bytes_in_line")] pub fn bytes_in_line(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_bytes_in_line(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_char")] pub fn char(&self) -> char { unsafe { std::convert::TryFrom::try_from(ffi::gtk_text_iter_get_char(self.to_glib_none().0)) .expect("conversion from an invalid Unicode value attempted") } } #[doc(alias = "gtk_text_iter_get_chars_in_line")] pub fn chars_in_line(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_chars_in_line(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_child_anchor")] pub fn child_anchor(&self) -> Option<TextChildAnchor> { unsafe { from_glib_none(ffi::gtk_text_iter_get_child_anchor(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_get_language")] pub fn language(&self) -> pango::Language { unsafe { from_glib_full(ffi::gtk_text_iter_get_language(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_get_line")] pub fn line(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_line(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_line_index")] pub fn line_index(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_line_index(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_line_offset")] pub fn line_offset(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_line_offset(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_marks")] pub fn marks(&self) -> Vec<TextMark> { unsafe { FromGlibPtrContainer::from_glib_container(ffi::gtk_text_iter_get_marks( self.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_get_offset")] pub fn offset(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_offset(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_paintable")] pub fn paintable(&self) -> Option<gdk::Paintable> { unsafe { from_glib_none(ffi::gtk_text_iter_get_paintable(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_get_slice")] pub fn get_slice(&self, end: &TextIter) -> Option<glib::GString> { unsafe { from_glib_full(ffi::gtk_text_iter_get_slice( self.to_glib_none().0, end.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_get_tags")] pub fn tags(&self) -> Vec<TextTag> { unsafe { FromGlibPtrContainer::from_glib_container(ffi::gtk_text_iter_get_tags( self.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_get_text")] pub fn get_text(&self, end: &TextIter) -> Option<glib::GString> { unsafe { from_glib_full(ffi::gtk_text_iter_get_text( self.to_glib_none().0, end.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_get_toggled_tags")] pub fn get_toggled_tags(&self, toggled_on: bool) -> Vec<TextTag> { unsafe { FromGlibPtrContainer::from_glib_container(ffi::gtk_text_iter_get_toggled_tags( self.to_glib_none().0, toggled_on.to_glib(), )) } } #[doc(alias = "gtk_text_iter_get_visible_line_index")] pub fn visible_line_index(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_visible_line_index(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_visible_line_offset")] pub fn visible_line_offset(&self) -> i32 { unsafe { ffi::gtk_text_iter_get_visible_line_offset(self.to_glib_none().0) } } #[doc(alias = "gtk_text_iter_get_visible_slice")] pub fn get_visible_slice(&self, end: &TextIter) -> Option<glib::GString> { unsafe { from_glib_full(ffi::gtk_text_iter_get_visible_slice( self.to_glib_none().0, end.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_get_visible_text")] pub fn get_visible_text(&self, end: &TextIter) -> Option<glib::GString> { unsafe { from_glib_full(ffi::gtk_text_iter_get_visible_text( self.to_glib_none().0, end.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_has_tag")] pub fn has_tag<P: IsA<TextTag>>(&self, tag: &P) -> bool { unsafe { from_glib(ffi::gtk_text_iter_has_tag( self.to_glib_none().0, tag.as_ref().to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_in_range")] pub fn in_range(&self, start: &TextIter, end: &TextIter) -> bool { unsafe { from_glib(ffi::gtk_text_iter_in_range( self.to_glib_none().0, start.to_glib_none().0, end.to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_inside_sentence")] pub fn inside_sentence(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_inside_sentence(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_inside_word")] pub fn inside_word(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_inside_word(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_is_cursor_position")] pub fn is_cursor_position(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_is_cursor_position(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_is_end")] pub fn is_end(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_is_end(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_is_start")] pub fn is_start(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_is_start(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_order")] pub fn order(&mut self, second: &mut TextIter) { unsafe { ffi::gtk_text_iter_order(self.to_glib_none_mut().0, second.to_glib_none_mut().0); } } #[doc(alias = "gtk_text_iter_set_line")] pub fn set_line(&mut self, line_number: i32) { unsafe { ffi::gtk_text_iter_set_line(self.to_glib_none_mut().0, line_number); } } #[doc(alias = "gtk_text_iter_set_line_index")] pub fn set_line_index(&mut self, byte_on_line: i32) { unsafe { ffi::gtk_text_iter_set_line_index(self.to_glib_none_mut().0, byte_on_line); } } #[doc(alias = "gtk_text_iter_set_line_offset")] pub fn set_line_offset(&mut self, char_on_line: i32) { unsafe { ffi::gtk_text_iter_set_line_offset(self.to_glib_none_mut().0, char_on_line); } } #[doc(alias = "gtk_text_iter_set_offset")] pub fn set_offset(&mut self, char_offset: i32) { unsafe { ffi::gtk_text_iter_set_offset(self.to_glib_none_mut().0, char_offset); } } #[doc(alias = "gtk_text_iter_set_visible_line_index")] pub fn set_visible_line_index(&mut self, byte_on_line: i32) { unsafe { ffi::gtk_text_iter_set_visible_line_index(self.to_glib_none_mut().0, byte_on_line); } } #[doc(alias = "gtk_text_iter_set_visible_line_offset")] pub fn set_visible_line_offset(&mut self, char_on_line: i32) { unsafe { ffi::gtk_text_iter_set_visible_line_offset(self.to_glib_none_mut().0, char_on_line); } } #[doc(alias = "gtk_text_iter_starts_line")] pub fn starts_line(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_starts_line(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_starts_sentence")] pub fn starts_sentence(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_starts_sentence(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_starts_tag")] pub fn starts_tag<P: IsA<TextTag>>(&self, tag: Option<&P>) -> bool { unsafe { from_glib(ffi::gtk_text_iter_starts_tag( self.to_glib_none().0, tag.map(|p| p.as_ref()).to_glib_none().0, )) } } #[doc(alias = "gtk_text_iter_starts_word")] pub fn starts_word(&self) -> bool { unsafe { from_glib(ffi::gtk_text_iter_starts_word(self.to_glib_none().0)) } } #[doc(alias = "gtk_text_iter_toggles_tag")] pub fn toggles_tag<P: IsA<TextTag>>(&self, tag: Option<&P>) -> bool { unsafe { from_glib(ffi::gtk_text_iter_toggles_tag( self.to_glib_none().0, tag.map(|p| p.as_ref()).to_glib_none().0, )) } } } impl PartialOrd for TextIter { #[inline] fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.compare(other).partial_cmp(&0) } } impl Ord for TextIter { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.compare(other).cmp(&0) } } impl PartialEq for TextIter { #[inline] fn eq(&self, other: &Self) -> bool { self.equal(other) } } impl Eq for TextIter {}
31.853301
96
0.576451
4851259c5729a06bed61228290dd55e8566012b0
349
//#![windows_subsystem = "windows"] use webview_app::{app::App, app::AppSettings}; fn run_app() { let app = App::new( AppSettings { title: "Rust Web View 👍".to_string(), url: "https://test/crates.io".to_string(), ..Default::default() } ); app.run(); } fn main() { run_app(); }
19.388889
55
0.507163
d6d276fe50d200ca2cd7089f8cb88a483d6f924e
16,364
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::iter::repeat; use cryptoutil::{copy_memory, read_u64v_le, write_u64v_le}; use digest::Digest; use mac::{Mac, MacResult}; use util::secure_memset; static IV : [u64; 8] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, ]; static SIGMA : [[usize; 16]; 12] = [ [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], [ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], [ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 ], [ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 ], [ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 ], [ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 ], [ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 ], [ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 ], [ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 ], [ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 ], [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], [ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], ]; const BLAKE2B_BLOCKBYTES : usize = 128; const BLAKE2B_OUTBYTES : usize = 64; const BLAKE2B_KEYBYTES : usize = 64; const BLAKE2B_SALTBYTES : usize = 16; const BLAKE2B_PERSONALBYTES : usize = 16; #[derive(Copy)] pub struct Blake2b { h: [u64; 8], t: [u64; 2], f: [u64; 2], buf: [u8; 2*BLAKE2B_BLOCKBYTES], buflen: usize, key: [u8; BLAKE2B_KEYBYTES], key_length: u8, last_node: u8, digest_length: u8, computed: bool, // whether the final digest has been computed } impl Clone for Blake2b { fn clone(&self) -> Blake2b { *self } } struct Blake2bParam { digest_length: u8, key_length: u8, fanout: u8, depth: u8, leaf_length: u32, node_offset: u64, node_depth: u8, inner_length: u8, reserved: [u8; 14], salt: [u8; BLAKE2B_SALTBYTES], personal: [u8; BLAKE2B_PERSONALBYTES], } macro_rules! G( ($r:expr, $i:expr, $a:expr, $b:expr, $c:expr, $d:expr, $m:expr) => ({ $a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+0]]); $d = ($d ^ $a).rotate_right(32); $c = $c.wrapping_add($d); $b = ($b ^ $c).rotate_right(24); $a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+1]]); $d = ($d ^ $a).rotate_right(16); $c = $c .wrapping_add($d); $b = ($b ^ $c).rotate_right(63); })); macro_rules! round( ($r:expr, $v:expr, $m:expr) => ( { G!($r,0,$v[ 0],$v[ 4],$v[ 8],$v[12], $m); G!($r,1,$v[ 1],$v[ 5],$v[ 9],$v[13], $m); G!($r,2,$v[ 2],$v[ 6],$v[10],$v[14], $m); G!($r,3,$v[ 3],$v[ 7],$v[11],$v[15], $m); G!($r,4,$v[ 0],$v[ 5],$v[10],$v[15], $m); G!($r,5,$v[ 1],$v[ 6],$v[11],$v[12], $m); G!($r,6,$v[ 2],$v[ 7],$v[ 8],$v[13], $m); G!($r,7,$v[ 3],$v[ 4],$v[ 9],$v[14], $m); } )); impl Blake2b { fn set_lastnode(&mut self) { self.f[1] = 0xFFFFFFFFFFFFFFFF; } fn set_lastblock(&mut self) { if self.last_node!=0 { self.set_lastnode(); } self.f[0] = 0xFFFFFFFFFFFFFFFF; } fn increment_counter(&mut self, inc : u64) { self.t[0] += inc; self.t[1] += if self.t[0] < inc { 1 } else { 0 }; } fn init0(digest_length: u8, key: &[u8]) -> Blake2b { assert!(key.len() <= BLAKE2B_KEYBYTES); let mut b = Blake2b { h: IV, t: [0,0], f: [0,0], buf: [0; 2*BLAKE2B_BLOCKBYTES], buflen: 0, last_node: 0, digest_length: digest_length, computed: false, key: [0; BLAKE2B_KEYBYTES], key_length: key.len() as u8 }; copy_memory(key, &mut b.key); b } fn apply_param(&mut self, p: &Blake2bParam) { use std::io::Write; use cryptoutil::WriteExt; let mut param_bytes : [u8; 64] = [0; 64]; { let mut writer: &mut [u8] = &mut param_bytes; writer.write_u8(p.digest_length).unwrap(); writer.write_u8(p.key_length).unwrap(); writer.write_u8(p.fanout).unwrap(); writer.write_u8(p.depth).unwrap(); writer.write_u32_le(p.leaf_length).unwrap(); writer.write_u64_le(p.node_offset).unwrap(); writer.write_u8(p.node_depth).unwrap(); writer.write_u8(p.inner_length).unwrap(); writer.write_all(&p.reserved).unwrap(); writer.write_all(&p.salt).unwrap(); writer.write_all(&p.personal).unwrap(); } let mut param_words : [u64; 8] = [0; 8]; read_u64v_le(&mut param_words, &param_bytes); for (h, param_word) in self.h.iter_mut().zip(param_words.iter()) { *h = *h ^ *param_word; } } // init xors IV with input parameter block fn init_param( p: &Blake2bParam, key: &[u8] ) -> Blake2b { let mut b = Blake2b::init0(p.digest_length, key); b.apply_param(p); b } fn default_param(outlen: u8) -> Blake2bParam { Blake2bParam { digest_length: outlen, key_length: 0, fanout: 1, depth: 1, leaf_length: 0, node_offset: 0, node_depth: 0, inner_length: 0, reserved: [0; 14], salt: [0; BLAKE2B_SALTBYTES], personal: [0; BLAKE2B_PERSONALBYTES], } } pub fn new(outlen: usize) -> Blake2b { assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES); Blake2b::init_param(&Blake2b::default_param(outlen as u8), &[]) } fn apply_key(&mut self) { let mut block : [u8; BLAKE2B_BLOCKBYTES] = [0; BLAKE2B_BLOCKBYTES]; copy_memory(&self.key[..self.key_length as usize], &mut block); self.update(&block); secure_memset(&mut block[..], 0); } pub fn new_keyed(outlen: usize, key: &[u8] ) -> Blake2b { assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES); assert!(key.len() > 0 && key.len() <= BLAKE2B_KEYBYTES); let param = Blake2bParam { digest_length: outlen as u8, key_length: key.len() as u8, fanout: 1, depth: 1, leaf_length: 0, node_offset: 0, node_depth: 0, inner_length: 0, reserved: [0; 14], salt: [0; BLAKE2B_SALTBYTES], personal: [0; BLAKE2B_PERSONALBYTES], }; let mut b = Blake2b::init_param(&param, key); b.apply_key(); b } fn compress(&mut self) { let mut ms: [u64; 16] = [0; 16]; let mut vs: [u64; 16] = [0; 16]; read_u64v_le(&mut ms, &self.buf[0..BLAKE2B_BLOCKBYTES]); for (v, h) in vs.iter_mut().zip(self.h.iter()) { *v = *h; } vs[ 8] = IV[0]; vs[ 9] = IV[1]; vs[10] = IV[2]; vs[11] = IV[3]; vs[12] = self.t[0] ^ IV[4]; vs[13] = self.t[1] ^ IV[5]; vs[14] = self.f[0] ^ IV[6]; vs[15] = self.f[1] ^ IV[7]; round!( 0, vs, ms ); round!( 1, vs, ms ); round!( 2, vs, ms ); round!( 3, vs, ms ); round!( 4, vs, ms ); round!( 5, vs, ms ); round!( 6, vs, ms ); round!( 7, vs, ms ); round!( 8, vs, ms ); round!( 9, vs, ms ); round!( 10, vs, ms ); round!( 11, vs, ms ); for (h_elem, (v_low, v_high)) in self.h.iter_mut().zip( vs[0..8].iter().zip(vs[8..16].iter()) ) { *h_elem = *h_elem ^ *v_low ^ *v_high; } } fn update( &mut self, mut input: &[u8] ) { while input.len() > 0 { let left = self.buflen; let fill = 2 * BLAKE2B_BLOCKBYTES - left; if input.len() > fill { copy_memory(&input[0..fill], &mut self.buf[left..]); // Fill buffer self.buflen += fill; self.increment_counter( BLAKE2B_BLOCKBYTES as u64); self.compress(); let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES); let first_half = halves.next().unwrap(); let second_half = halves.next().unwrap(); copy_memory(second_half, first_half); self.buflen -= BLAKE2B_BLOCKBYTES; input = &input[fill..input.len()]; } else { // inlen <= fill copy_memory(input, &mut self.buf[left..]); self.buflen += input.len(); break; } } } fn finalize( &mut self, out: &mut [u8] ) { assert!(out.len() == self.digest_length as usize); if !self.computed { if self.buflen > BLAKE2B_BLOCKBYTES { self.increment_counter(BLAKE2B_BLOCKBYTES as u64); self.compress(); self.buflen -= BLAKE2B_BLOCKBYTES; let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES); let first_half = halves.next().unwrap(); let second_half = halves.next().unwrap(); copy_memory(second_half, first_half); } let incby = self.buflen as u64; self.increment_counter(incby); self.set_lastblock(); let mut temp_buf = self.buf; let buf_slice = &mut temp_buf[self.buflen..]; for b in buf_slice.iter_mut() { *b = 0; } self.compress(); write_u64v_le(&mut self.buf[0..64], &self.h); self.computed = true; } let outlen = out.len(); copy_memory(&self.buf[0..outlen], out); } pub fn blake2b(out: &mut[u8], input: &[u8], key: &[u8]) { let mut hasher : Blake2b = if key.len() > 0 { Blake2b::new_keyed(out.len(), key) } else { Blake2b::new(out.len()) }; hasher.update(input); hasher.finalize(out); } } impl Digest for Blake2b { fn reset(&mut self) { for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) { *h_elem = *iv_elem; } for t_elem in self.t.iter_mut() { *t_elem = 0; } for f_elem in self.f.iter_mut() { *f_elem = 0; } for b in self.buf.iter_mut() { *b = 0; } self.buflen = 0; self.last_node = 0; self.computed = false; let len = self.digest_length; self.apply_param(&Blake2b::default_param(len)); } fn input(&mut self, msg: &[u8]) { self.update(msg); } fn result(&mut self, out: &mut [u8]) { self.finalize(out); } fn output_bits(&self) -> usize { 8 * (self.digest_length as usize) } fn block_size(&self) -> usize { 8 * BLAKE2B_BLOCKBYTES } } impl Mac for Blake2b { /** * Process input data. * * # Arguments * * data - The input data to process. * */ fn input(&mut self, data: &[u8]) { self.update(data); } /** * Reset the Mac state to begin processing another input stream. */ fn reset(&mut self) { for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) { *h_elem = *iv_elem; } for t_elem in self.t.iter_mut() { *t_elem = 0; } for f_elem in self.f.iter_mut() { *f_elem = 0; } for b in self.buf.iter_mut() { *b = 0; } self.buflen = 0; self.last_node = 0; self.computed = false; let len = self.digest_length; self.apply_param(&Blake2b::default_param(len)); self.apply_key(); } /** * Obtain the result of a Mac computation as a MacResult. */ fn result(&mut self) -> MacResult { let mut mac: Vec<u8> = repeat(0).take(self.digest_length as usize).collect(); self.raw_result(&mut mac); MacResult::new_from_owned(mac) } /** * Obtain the result of a Mac computation as [u8]. This method should be used very carefully * since incorrect use of the Mac code could result in permitting a timing attack which defeats * the security provided by a Mac function. */ fn raw_result(&mut self, output: &mut [u8]) { self.finalize(output); } /** * Get the size of the Mac code, in bytes. */ fn output_bytes(&self) -> usize { self.digest_length as usize } } #[cfg(test)] mod digest_tests { //use cryptoutil::test::test_digest_1million_random; use blake2b::Blake2b; use digest::Digest; struct Test { input: &'static str, output_str: &'static str, } fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) { // Test that it works when accepting the message all at once for t in tests.iter() { sh.input_str(t.input); let out_str = sh.result_str(); assert!(&out_str[..] == t.output_str); sh.reset(); } // Test that it works when accepting the message in pieces for t in tests.iter() { let len = t.input.len(); let mut left = len; while left > 0 { let take = (left + 1) / 2; sh.input_str(&t.input[len - left..take + len - left]); left = left - take; } let out_str = sh.result_str(); assert!(&out_str[..] == t.output_str); sh.reset(); } } #[test] fn test_blake2b_digest() { // Examples from wikipedia let wikipedia_tests = vec![ Test { input: "", output_str: "786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419\ d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce" }, Test { input: "The quick brown fox jumps over the lazy dog", output_str: "a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673\ f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918" }, ]; let tests = wikipedia_tests; let mut sh = Blake2b::new(64); test_hash(&mut sh, &tests[..]); } } #[cfg(test)] mod mac_tests { use blake2b::Blake2b; use mac::Mac; #[test] fn test_blake2b_mac() { let key: Vec<u8> = (0..64).map(|i| i).collect(); let mut m = Blake2b::new_keyed(64, &key[..]); m.input(&[1,2,4,8]); let expected = [ 0x8e, 0xc6, 0xcb, 0x71, 0xc4, 0x5c, 0x3c, 0x90, 0x91, 0xd0, 0x8a, 0x37, 0x1e, 0xa8, 0x5d, 0xc1, 0x22, 0xb5, 0xc8, 0xe2, 0xd9, 0xe5, 0x71, 0x42, 0xbf, 0xef, 0xce, 0x42, 0xd7, 0xbc, 0xf8, 0x8b, 0xb0, 0x31, 0x27, 0x88, 0x2e, 0x51, 0xa9, 0x21, 0x44, 0x62, 0x08, 0xf6, 0xa3, 0x58, 0xa9, 0xe0, 0x7d, 0x35, 0x3b, 0xd3, 0x1c, 0x41, 0x70, 0x15, 0x62, 0xac, 0xd5, 0x39, 0x4e, 0xee, 0x73, 0xae, ]; assert_eq!(m.result().code().to_vec(), expected.to_vec()); } } #[cfg(all(test, feature = "with-bench"))] mod bench { use test::Bencher; use digest::Digest; use blake2b::Blake2b; #[bench] pub fn blake2b_10(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 10]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } #[bench] pub fn blake2b_1k(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 1024]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } #[bench] pub fn blake2b_64k(bh: & mut Bencher) { let mut sh = Blake2b::new(64); let bytes = [1u8; 65536]; bh.iter( || { sh.input(&bytes); }); bh.bytes = bytes.len() as u64; } }
30.875472
124
0.509472
f46c8cf9aa952fdacea511d27cddc34cee259c97
3,973
#[macro_use] pub mod point_traits; #[cfg(feature = "cgmath")] mod cgmath_conversions; #[cfg(feature = "glam")] mod glam_conversions; #[cfg(feature = "mint")] mod mint_conversions; #[cfg(feature = "nalgebra")] mod nalgebra_conversions; #[cfg(feature = "sdfu")] mod sdfu_integration; #[cfg(feature = "vox-format")] mod vox_format_conversions; mod point2; mod point3; pub use point2::*; pub use point3::*; use point_traits::*; use bytemuck::{Pod, Zeroable}; use core::ops::{Add, AddAssign, Neg, Sub, SubAssign}; use num::{Signed, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// An N-dimensional point (where N=2 or N=3), which is usually just a primitive array like /// `[i32; 2]` or `[i32; 3]`. It is most convenient to construct points of any dimension as: /// /// ``` /// use building_blocks_core::PointN; /// /// let p2 = PointN([1, 2]); // 2D /// let p3 = PointN([1, 2, 3]); // 3D /// ``` /// /// Points support basic linear algebraic operations such as addition, subtraction, scalar /// multiplication, and scalar division. /// /// ``` /// # use building_blocks_core::prelude::*; /// # /// let p1 = PointN([1, 2]); /// let p2 = PointN([3, 4]); /// /// assert_eq!(p1 + p2, PointN([4, 6])); /// assert_eq!(p1 - p2, PointN([-2, -2])); /// /// assert_eq!(p1 * 2, PointN([2, 4])); /// assert_eq!(p1 / 2, PointN([0, 1])); /// /// // Also some component-wise operations. /// assert_eq!(p1 * p2, PointN([3, 8])); /// assert_eq!(p1 / p2, PointN([0, 0])); /// assert_eq!(p2 / p1, PointN([3, 2])); /// ``` /// /// There is also a partial order defined on points which says that a point A is greater than a /// point B if and only if all of the components of point A are greater than point B. This is useful /// for easily checking is a point is inside of the extent between two other points: /// /// ``` /// # use building_blocks_core::PointN; /// # /// let min = PointN([0, 0, 0]); /// let least_upper_bound = PointN([3, 3, 3]); /// /// let p = PointN([0, 1, 2]); /// assert!(min <= p && p < least_upper_bound); /// ``` #[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct PointN<N>(pub N); unsafe impl<N> Zeroable for PointN<N> where N: Zeroable {} unsafe impl<N> Pod for PointN<N> where N: Pod {} impl<N> PointN<N> where Self: MapComponents, { #[inline] pub fn signum(self) -> Self where <Self as MapComponents>::Scalar: Signed, { self.map_components_unary(|c| c.signum()) } } impl<N> Abs for PointN<N> where Self: MapComponents, <Self as MapComponents>::Scalar: Signed, { #[inline] fn abs(self) -> Self { self.map_components_unary(|c| c.abs()) } } impl<N> Neg for PointN<N> where Self: Copy + Sub<Output = Self> + Zero, { type Output = Self; #[inline] fn neg(self) -> Self::Output { Self::zero() - self } } impl<N, T> Add for PointN<N> where Self: MapComponents<Scalar = T>, T: Add<Output = T>, { type Output = Self; #[inline] fn add(self, rhs: Self) -> Self::Output { self.map_components_binary(rhs, |c1, c2| c1 + c2) } } impl<N, T> Sub for PointN<N> where Self: MapComponents<Scalar = T>, T: Sub<Output = T>, { type Output = Self; #[inline] fn sub(self, rhs: Self) -> Self::Output { self.map_components_binary(rhs, |c1, c2| c1 - c2) } } impl<N> AddAssign for PointN<N> where Self: Copy + Add<Output = Self>, { #[inline] fn add_assign(&mut self, rhs: Self) { *self = *self + rhs; } } impl<N> SubAssign for PointN<N> where Self: Copy + Sub<Output = Self>, { #[inline] fn sub_assign(&mut self, rhs: Self) { *self = *self - rhs; } } impl<N> Zero for PointN<N> where Self: Point + ConstZero, { #[inline] fn zero() -> Self { Self::ZERO } #[inline] fn is_zero(&self) -> bool { *self == Self::zero() } }
22.195531
100
0.597282
7978b0ec03eef724bbdd52a70586d5bf1afd8b50
1,696
use std::sync::RwLock; use std::ops::{Deref, DerefMut}; use std::fmt; use serde_json::Value; use super::{RcData, RcItem, DataType}; #[derive(Clone)] pub struct RcChild(*mut RwLock<Child>); impl RcChild { pub fn new(data: RcData, item: RcItem) -> RcChild { RcChild(Box::into_raw(Box::new(RwLock::new(Child::new(data, item))))) } pub fn destroy(self) { unsafe { Box::from_raw(self.0); } } } unsafe impl Send for RcChild {} unsafe impl Sync for RcChild {} impl fmt::Debug for RcChild { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.read().unwrap().get_item_value()) } } impl Deref for RcChild { type Target = RwLock<Child>; fn deref(&self) -> &Self::Target { unsafe { &*self.0 } } } impl DerefMut for RcChild { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { &mut *self.0 } } } #[derive(Clone)] pub struct Child { pub data: RcData, pub item: RcItem, } impl Child { pub fn new(data: RcData, item: RcItem) -> Child { Child { data: data, item: item, } } pub fn get_item_pointer(&self) -> *const Value { self.item.read().unwrap().get_pointer() } pub fn get_item_value(&self) -> &Value { unsafe { &*self.get_item_pointer() } } pub fn get_value(&self) -> Value { self.data.read().unwrap().get_value().clone() } pub fn get_type(&self) -> DataType { self.data.read().unwrap()._type.clone() } } impl fmt::Debug for Child { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.data) } }
20.433735
77
0.561321
f40da6ddac80232eb71c0b6f0a36ba034b4e0df2
3,986
// from_str.rs // This is similar to from_into.rs, but this time we'll implement `FromStr` // and return errors instead of falling back to a default value. // Additionally, upon implementing FromStr, you can use the `parse` method // on strings to generate an object of the implementor type. // You can read more about it at https://doc.rust-lang.org/std/str/trait.FromStr.html use std::num::ParseIntError; use std::str::FromStr; #[derive(Debug, PartialEq)] struct Person { name: String, age: usize, } // We will use this error type for the `FromStr` implementation. #[derive(Debug, PartialEq)] enum ParsePersonError { // Empty input string Empty, // Incorrect number of fields BadLen, // Empty name field NoName, // Wrapped error from parse::<usize>() ParseInt(ParseIntError), } // Steps: // 1. If the length of the provided string is 0, an error should be returned // 2. Split the given string on the commas present in it // 3. Only 2 elements should be returned from the split, otherwise return an error // 4. Extract the first element from the split operation and use it as the name // 5. Extract the other element from the split operation and parse it into a `usize` as the age // with something like `"4".parse::<usize>()` // 6. If while extracting the name and the age something goes wrong, an error should be returned // If everything goes well, then return a Result of a Person object impl FromStr for Person { type Err = ParsePersonError; fn from_str(s: &str) -> Result<Person, Self::Err> { if s.is_empty() { return Err(ParsePersonError::Empty) } else { let mut i = s.split(',').collect::<Vec<&str>>(); if i.len() != 2 { Err(ParsePersonError::BadLen) } else { let name = i[0]; let age = i[1].parse::<usize>().or_else(|e| Err(ParsePersonError::ParseInt(e)))?; if name.is_empty() { Err(ParsePersonError::NoName) } else { Ok(Person { age, name: String::from(name) }) } } } } } fn main() { let p = "Mark,20".parse::<Person>().unwrap(); println!("{:?}", p); } #[cfg(test)] mod tests { use super::*; #[test] fn empty_input() { assert_eq!("".parse::<Person>(), Err(ParsePersonError::Empty)); } #[test] fn good_input() { let p = "John,32".parse::<Person>(); assert!(p.is_ok()); let p = p.unwrap(); assert_eq!(p.name, "John"); assert_eq!(p.age, 32); } #[test] fn missing_age() { assert!(matches!( "John,".parse::<Person>(), Err(ParsePersonError::ParseInt(_)) )); } #[test] fn invalid_age() { assert!(matches!( "John,twenty".parse::<Person>(), Err(ParsePersonError::ParseInt(_)) )); } #[test] fn missing_comma_and_age() { assert_eq!("John".parse::<Person>(), Err(ParsePersonError::BadLen)); } #[test] fn missing_name() { assert_eq!(",1".parse::<Person>(), Err(ParsePersonError::NoName)); } #[test] fn missing_name_and_age() { assert!(matches!( ",".parse::<Person>(), Err(ParsePersonError::NoName | ParsePersonError::ParseInt(_)) )); } #[test] fn missing_name_and_invalid_age() { assert!(matches!( ",one".parse::<Person>(), Err(ParsePersonError::NoName | ParsePersonError::ParseInt(_)) )); } #[test] fn trailing_comma() { assert_eq!("John,32,".parse::<Person>(), Err(ParsePersonError::BadLen)); } #[test] fn trailing_comma_and_some_string() { assert_eq!( "John,32,man".parse::<Person>(), Err(ParsePersonError::BadLen) ); } }
28.269504
97
0.560963
3924b166f451db88c3b5f5d0aabd328ebbca3dc6
6,474
use std::rc::Rc; use crate::eval::*; use crate::eval_file; use crate::parser::parse_expression; use crate::scope::*; fn assert_eval(expr: &str, expected: &str) { assert_eval_with_scope(&get_global_scope(), expr, expected); } fn assert_eval_with_scope(scope: &Rc<Scope>, expr: &str, expected: &str) { let obj = parse_expression(expr).unwrap().pop().unwrap(); eval(&Rc::new(obj), &scope) .map(|obj| assert_eq!(format!("{}", obj), expected)) .unwrap_or_else(|err| panic!(err)); } fn expect_err(expr: &str) { let scope = Scope::new(&[], Some(&get_global_scope())); let obj = parse_expression(expr).unwrap().pop().unwrap(); eval(&Rc::new(obj), &scope).expect_err(format!("error expected for {}", expr).as_str()); } #[test] #[rustfmt::skip] fn eval_test() { // core functions and special forms assert_eval("(begin 1 2 3)", "3"); assert_eval("'1", "1"); assert_eval("'''foo", "(quote (quote foo))"); expect_err("(list a)"); // unbound variable expect_err("(begin 1 . 2)"); // not a proper list expect_err("(quote 1 2)"); // malformed quote // list and pairs functions assert_eval("(car '(1 . 2))", "1"); assert_eval("(cdr '(1 . 2))", "2"); assert_eval("(car (cdr '(1 2 3)))", "2"); assert_eval("(cadr '(1 2 3))", "2"); assert_eval("(cadar '((1 2) 3))", "2"); assert_eval("(cddr '(1 2 3))", "(3)"); assert_eval("(caaaar '((((1 2 3))) 4))", "1"); assert_eval("(length '(1 2 3))", "3"); assert_eval("(cons 1 2)", "(1 . 2)"); assert_eval("(cons 1 '(2 3))", "(1 2 3)"); assert_eval("(list 1 2 3)", "(1 2 3)"); // let, define assert_eval("(let ((x 2)) x)", "2"); assert_eval("(let ((x car) (y '(1 2 3))) (x y))", "1"); assert_eval("(let ((x 1)) (let ((x 2) (y x)) y))", "1"); assert_eval("(begin (define x 5) (cons (begin (define x 2) x) x))", "(2 . 5)"); assert_eval("(begin (define (x)) x)", "<function>"); assert_eval("(begin (define (x a) (car a)) (x '(5 6)))", "5"); assert_eval("(begin (define (tail a . b) b) (tail 1 2 3))", "(2 3)"); // lambda functions assert_eval("((lambda x x) 1 2 3)", "(1 2 3)"); assert_eval("((lambda (x . y) y) 1 2 3)", "(2 3)"); assert_eval("(begin (define f (lambda (x) x)) (f 'foo))", "foo"); expect_err("(lambda (1) a)"); // wrong argument type expect_err("(lambda (x x) x)"); // duplication of argument id expect_err("((lambda (a b) a) 1)"); // too few arguments given expect_err("((lambda (a b) a) 1 2 3)"); // too many arguments given // logic functions assert_eval("(if #t 1 2)", "1"); assert_eval("(if #f 1 2)", "2"); assert_eval("(or 5 foo #f)", "5"); expect_err("(or #f foo)"); // unbound variable assert_eval("(and 5 'foo 42)", "42"); assert_eval("(list (boolean? #f) (boolean? 5))", "(#t #f)"); assert_eval("(list (null? #t) (null? '(5)) (null? '()) (null? (cdr '(5))))", "(#f #f #t #t)"); assert_eval("(list (pair? '(1 2)) (pair? 5))", "(#t #f)"); assert_eval("(list (list? '(1 2)) (list? 5) (list? '(1 . 2)))", "(#t #f #f)"); assert_eval("(list (not #f) (not 5))", "(#t #f)"); assert_eval("(cond (#f 42) ('foo))", "foo"); assert_eval("(cond (#f 42) (5 'foo))", "foo"); assert_eval("(cond (#f 42))", "#<undef>"); assert_eval("(cond (#f 42) (else))", "#<undef>"); assert_eval("(cond (#f 42) (else 1 2))", "2"); assert_eval("(cond (#f 42) (#t 1 2))", "2"); // arithmetic functions assert_eval("(list (number? 1) (number? 'foo))", "(#t #f)"); assert_eval("(list (integer? 1) (integer? 1.0))", "(#t #f)"); assert_eval("(list (real? 1) (real? 1.0))", "(#t #t)"); assert_eval("(list (= 1 1 2) (= 1 1.0))", "(#f #t)"); assert_eval("(list (< 1 2.0 3) (< 1 3 2))", "(#t #f)"); assert_eval("(list (> 5 3 0) (> 5 1.0 3))", "(#t #f)"); assert_eval("(list (+) (+ 1) (+ 1 2 3) (+ 1 2 3.5))", "(0 1 6 6.5)"); assert_eval("(list (-) (- 1) (- 1 2.5) (- 1 2 3))", "(0 -1 -1.5 -4)"); assert_eval("(list (*) (* 2) (* 1 2 3.5))", "(1 2 7)"); assert_eval("(list (/) (/ 2) (/ 2 1))", "(1 0.5 2)"); assert_eval("(list (integer? (/ 2 1)) (integer? (/ 1 2)))", "(#t #f)"); expect_err("(= 1 foo)"); // wrong arguments expect_err("(+ 1 'foo)"); // wrong type expect_err("(/1 2 0)"); // division by 0 assert_eval("(apply list '(1 2 3))", "(1 2 3)"); assert_eval("(apply list 1 2 '(3 4))", "(1 2 3 4)"); assert_eval("(let ((foo (lambda (x) (+ x 10)))) (apply foo '(0)))", "10"); expect_err("(apply + 1 2 3)"); // improper list expect_err("(apply +)"); assert_eval("(map list '(1 2 3))", "((1) (2) (3))"); assert_eval("(map list '(1 2 3) '(4 5 6))", "((1 4) (2 5) (3 6))"); expect_err("(map + '(1 2) '(4 5 6))"); // lists length don't match expect_err("(map +)"); // equalities assert_eval("(list (eqv? '() '()) (eqv? '(a) '(a)) (eqv? '(()) '(())))", "(#t #f #f)"); assert_eval("(list (eqv? #t #t) (eqv? #t #f) (eqv? #t 42))", "(#t #f #f)"); assert_eval("(list (eqv? 'a 'a) (eqv? 'a 'b))", "(#t #f)"); assert_eval("(eqv? (lambda () 1) (lambda () 1))", "#f"); assert_eval("(let ((p (lambda (x) x))) (eqv? p p))", "#t"); assert_eval("(let ((a '(a)) (b '(a))) (list (eqv? a a) (eqv? a b)))", "(#t #f)"); assert_eval("(let ((a '(a b))) (eqv? (cdr a) (cdr a)))", "#t"); assert_eval("(let ((a '(a b))) (eqv? (cdr a) '(b)))", "#f"); assert_eval("(eqv? car car)", "#t"); assert_eval("(eqv? cdadar cdadar)", "#t"); assert_eval("(list (eqv? 2 2) (eqv? 2 3) (eqv? 2 2.0))", "(#t #f #t)"); assert_eval("(list (eq? 2 2) (eq? 2 3) (eq? 2 2.0))", "(#t #f #f)"); assert_eval("(equal? '(a b (c)) '(a b (c)))", "#t"); assert_eval("(equal? '(a b (c)) '(a b c))", "#f"); assert_eval("(equal? '(a b (c)) '(a b))", "#f"); assert_eval("(equal? '(2) '(2.0))", "#t"); // test functions from the prelude let scope = &get_global_scope(); assert!(eval_file("prelude.scm", scope).is_ok()); assert_eval_with_scope(scope, "(foldr cons '() '(1 2 3))", "(1 2 3)"); assert_eval_with_scope(scope, "(foldl cons '() '(1 2 3))", "(((() . 1) . 2) . 3)"); assert_eval_with_scope(scope, "(append '(1 2) '(3 4))", "(1 2 3 4)"); assert_eval_with_scope(scope, "(reverse '(1 2 3 4))", "(4 3 2 1)"); }
45.914894
98
0.491813
62f7bb07660f93d89fac29409cc9d56a8b612d5f
3,228
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::fmt; use common_datavalues::prelude::*; use common_exception::ErrorCode; use common_exception::Result; use crate::scalars::cast_column_field; use crate::scalars::Function; use crate::scalars::FunctionDescription; use crate::scalars::FunctionFeatures; #[derive(Clone)] pub struct UnhexFunction { _display_name: String, } impl UnhexFunction { pub fn try_create(display_name: &str) -> Result<Box<dyn Function>> { Ok(Box::new(UnhexFunction { _display_name: display_name.to_string(), })) } pub fn desc() -> FunctionDescription { FunctionDescription::creator(Box::new(Self::try_create)) .features(FunctionFeatures::default().deterministic().num_arguments(1)) } } impl Function for UnhexFunction { fn name(&self) -> &str { "unhex" } fn return_type(&self, args: &[&DataTypePtr]) -> Result<DataTypePtr> { if !args[0].data_type_id().is_string() && !args[0].data_type_id().is_null() { return Err(ErrorCode::IllegalDataType(format!( "Expected string or null, but got {}", args[0].data_type_id() ))); } Ok(StringType::arc()) } fn eval(&self, columns: &ColumnsWithField, input_rows: usize) -> Result<ColumnRef> { const BUFFER_SIZE: usize = 32; let col = cast_column_field(&columns[0], &StringType::arc())?; let col = col.as_any().downcast_ref::<StringColumn>().unwrap(); let mut builder: ColumnBuilder<Vu8> = ColumnBuilder::with_capacity(input_rows); for val in col.iter() { if val.len() <= BUFFER_SIZE * 2 { let size = val.len() / 2; let mut buffer = vec![0u8; size]; let buffer = &mut buffer[0..size]; match hex::decode_to_slice(val, buffer) { Ok(()) => builder.append(buffer), Err(err) => { return Err(ErrorCode::UnexpectedError(format!( "{} can not unhex because: {}", String::from_utf8_lossy(val), err ))) } } } else { return Err(ErrorCode::UnexpectedError(format!( "{} is too long than buffer size", String::from_utf8_lossy(val) ))); } } Ok(builder.build(input_rows)) } } impl fmt::Display for UnhexFunction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "UNHEX") } }
31.960396
88
0.578067
29a541c24468659d0071a116e6f2248892724e43
4,745
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn cmovb_1() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(SI)), operand2: Some(Direct(CX)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 66, 241], OperandSize::Word) } fn cmovb_2() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(BX)), operand2: Some(IndirectScaledIndexedDisplaced(BP, SI, One, 68, Some(OperandSize::Word), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 66, 90, 68], OperandSize::Word) } fn cmovb_3() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(BP)), operand2: Some(Direct(SP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 66, 236], OperandSize::Dword) } fn cmovb_4() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(DI)), operand2: Some(IndirectDisplaced(EAX, 1240678133, Some(OperandSize::Word), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 66, 184, 245, 62, 243, 73], OperandSize::Dword) } fn cmovb_5() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(BP)), operand2: Some(Direct(DI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 66, 239], OperandSize::Qword) } fn cmovb_6() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(SP)), operand2: Some(Indirect(RDX, Some(OperandSize::Word), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 66, 34], OperandSize::Qword) } fn cmovb_7() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(EDX)), operand2: Some(Direct(ESP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 66, 212], OperandSize::Word) } fn cmovb_8() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(ECX)), operand2: Some(IndirectScaledIndexed(BX, DI, One, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 66, 9], OperandSize::Word) } fn cmovb_9() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(EDI)), operand2: Some(Direct(ESI)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 66, 254], OperandSize::Dword) } fn cmovb_10() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(ESP)), operand2: Some(IndirectScaledDisplaced(EAX, Four, 168887598, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 66, 36, 133, 46, 5, 17, 10], OperandSize::Dword) } fn cmovb_11() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(ESI)), operand2: Some(Direct(EBP)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 66, 245], OperandSize::Qword) } fn cmovb_12() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(EDI)), operand2: Some(IndirectScaledIndexed(RCX, RDX, Two, Some(OperandSize::Dword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 66, 60, 81], OperandSize::Qword) } fn cmovb_13() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(RDI)), operand2: Some(Direct(RCX)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[72, 15, 66, 249], OperandSize::Qword) } fn cmovb_14() { run_test(&Instruction { mnemonic: Mnemonic::CMOVB, operand1: Some(Direct(RBP)), operand2: Some(Indirect(RDX, Some(OperandSize::Qword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[72, 15, 66, 42], OperandSize::Qword) }
74.140625
360
0.706217
26233bf2c696afb8ae6acea0061064ec824999fe
4,631
use super::{HandlerError, HandlerResult}; use crate::prelude::*; /// Handles the `depends on` & `-depends on` commands pub async fn handle( world: &World, ctxt: &int::MergeRequestCommandContext, action: int::CommandAction, dependency: int::MergeRequestPtr, ) -> HandlerResult<()> { let (gl_user, user_id) = sync_user(world, ctxt.user).await?; let (gl_project, gl_merge_request, merge_request_id) = sync_merge_request_ptr(world, &ctxt.merge_request, &Default::default()).await?; let src_context = int::PtrContext { namespace_id: Some(gl_project.namespace.id), project_id: Some(gl_project.id), }; let (gl_dst_project_id, gl_dst_merge_request_iid) = dependency .resolve(&world.gitlab, &src_context) .await .map_err(|_| HandlerError::MergeRequestNotFound)?; Handler { world, ctxt, user_id, merge_request_id, gl_dst_project_id, gl_dst_merge_request_iid, } .run(action) .await?; // TODO maybe we could thumbs-up the post instead of sending a comment? world .gitlab .create_merge_request_note( gl_project.id, gl_merge_request.iid, &ctxt.discussion, format!("@{} :+1:", gl_user.username), ) .await?; Ok(()) } struct Handler<'a> { world: &'a World, ctxt: &'a int::MergeRequestCommandContext, user_id: db::Id<db::User>, merge_request_id: db::Id<db::MergeRequest>, gl_dst_project_id: gl::ProjectId, gl_dst_merge_request_iid: gl::MergeRequestIid, } impl<'a> Handler<'a> { async fn run(self, action: int::CommandAction) -> HandlerResult<()> { // Since It's totally fine for a merge request pointer to be both resolved _and_ // invalid - e.g. when user writes `project!123` (assuming the project itself // exists) - we have to explicitly check whether the merge request user is // talking about exists or not if self .world .gitlab .merge_request(self.gl_dst_project_id, self.gl_dst_merge_request_iid) .await .is_err() { return Err(HandlerError::MergeRequestNotFound); } let dependency = self .world .db .get_opt(db::FindMergeRequestDependencies { user_id: Some(self.user_id), ext_discussion_id: Some(&self.ctxt.discussion), src_merge_request_id: Some(self.merge_request_id), ..Default::default() }) .await?; let (_, _, dst_merge_request_id) = sync_merge_request( self.world, self.gl_dst_project_id, self.gl_dst_merge_request_iid, ) .await?; if action.is_add() { self.run_add(dependency, dst_merge_request_id).await } else { self.run_remove(dependency).await } } /// Handles the `depends on` command async fn run_add( &self, dependency: Option<db::MergeRequestDependency>, dst_merge_request_id: db::Id<db::MergeRequest>, ) -> HandlerResult<()> { // It might happen that we already know about this dependency - say, when // someone adds the same `depends on !123` comment twice. // // In order to make the UI less confusing, when that happens, we're just // silently ignoring the second request. if dependency.is_none() { self.world .db .execute(db::CreateMergeRequestDependency { user_id: self.user_id, ext_discussion_id: self.ctxt.discussion.clone(), src_merge_request_id: self.merge_request_id, dst_merge_request_id, }) .await?; } Ok(()) } /// Handles the `-depends on` command async fn run_remove( &self, dependency: Option<db::MergeRequestDependency>, ) -> HandlerResult<()> { // It might happen that we've already removed this dependency - say, when // someone adds the same `-depends on !123` comment twice. // // In order to make the UI less confusing, when that happens, we're just // silently ignoring the second request. if let Some(dependency) = dependency { self.world .db .execute(db::DeleteMergeRequestDependency { id: dependency.id }) .await?; } Ok(()) } }
31.290541
88
0.579572
5b58016229d1eb79bdc9982d7cb2e21f7d2c7624
33,921
// Copyright 2019, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! Actor for DHT functionality. //! //! The DhtActor is responsible for sending a join request on startup //! and furnishing [DhtRequest]s. //! //! [DhtRequest]: ./enum.DhtRequest.html use crate::{ broadcast_strategy::BroadcastStrategy, discovery::DhtDiscoveryError, outbound::{DhtOutboundError, OutboundMessageRequester, SendMessageParams}, proto::{dht::JoinMessage, envelope::DhtMessageType}, storage::{DbConnection, DhtDatabase, DhtMetadataKey, StorageError}, DhtConfig, }; use chrono::{DateTime, Utc}; use futures::{ channel::{mpsc, mpsc::SendError, oneshot}, future, future::BoxFuture, stream::{Fuse, FuturesUnordered}, SinkExt, StreamExt, }; use log::*; use std::{cmp, fmt, fmt::Display, sync::Arc}; use tari_comms::{ connectivity::{ConnectivityError, ConnectivityRequester, ConnectivitySelection}, peer_manager::{NodeId, NodeIdentity, PeerFeatures, PeerManager, PeerManagerError, PeerQuery, PeerQuerySortBy}, }; use tari_shutdown::ShutdownSignal; use tari_utilities::message_format::{MessageFormat, MessageFormatError}; use thiserror::Error; use tokio::task; use ttl_cache::TtlCache; const LOG_TARGET: &str = "comms::dht::actor"; #[derive(Debug, Error)] pub enum DhtActorError { #[error("MPSC channel is disconnected")] ChannelDisconnected, #[error("MPSC sender was unable to send because the channel buffer is full")] SendBufferFull, #[error("Reply sender canceled the request")] ReplyCanceled, #[error("PeerManagerError: {0}")] PeerManagerError(#[from] PeerManagerError), #[error("Failed to broadcast join message: {0}")] FailedToBroadcastJoinMessage(DhtOutboundError), #[error("DiscoveryError: {0}")] DiscoveryError(#[from] DhtDiscoveryError), #[error("StorageError: {0}")] StorageError(#[from] StorageError), #[error("StoredValueFailedToDeserialize: {0}")] StoredValueFailedToDeserialize(MessageFormatError), #[error("FailedToSerializeValue: {0}")] FailedToSerializeValue(MessageFormatError), #[error("ConnectivityError: {0}")] ConnectivityError(#[from] ConnectivityError), #[error("Connectivity event stream closed")] ConnectivityEventStreamClosed, } impl From<SendError> for DhtActorError { fn from(err: SendError) -> Self { if err.is_disconnected() { DhtActorError::ChannelDisconnected } else if err.is_full() { DhtActorError::SendBufferFull } else { unreachable!(); } } } #[derive(Debug)] pub enum DhtRequest { /// Send a Join request to the network SendJoin, /// Inserts a message signature to the msg hash cache. This operation replies with a boolean /// which is true if the signature already exists in the cache, otherwise false MsgHashCacheInsert(Vec<u8>, oneshot::Sender<bool>), /// Fetch selected peers according to the broadcast strategy SelectPeers(BroadcastStrategy, oneshot::Sender<Vec<NodeId>>), GetMetadata(DhtMetadataKey, oneshot::Sender<Result<Option<Vec<u8>>, DhtActorError>>), SetMetadata(DhtMetadataKey, Vec<u8>, oneshot::Sender<Result<(), DhtActorError>>), } impl Display for DhtRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use DhtRequest::*; match self { SendJoin => f.write_str("SendJoin"), MsgHashCacheInsert(_, _) => f.write_str("MsgHashCacheInsert"), SelectPeers(s, _) => f.write_str(&format!("SelectPeers (Strategy={})", s)), GetMetadata(key, _) => f.write_str(&format!("GetMetadata (key={})", key)), SetMetadata(key, value, _) => { f.write_str(&format!("SetMetadata (key={}, value={} bytes)", key, value.len())) }, } } } #[derive(Clone)] pub struct DhtRequester { sender: mpsc::Sender<DhtRequest>, } impl DhtRequester { pub fn new(sender: mpsc::Sender<DhtRequest>) -> Self { Self { sender } } pub async fn send_join(&mut self) -> Result<(), DhtActorError> { self.sender.send(DhtRequest::SendJoin).await.map_err(Into::into) } pub async fn select_peers(&mut self, broadcast_strategy: BroadcastStrategy) -> Result<Vec<NodeId>, DhtActorError> { let (reply_tx, reply_rx) = oneshot::channel(); self.sender .send(DhtRequest::SelectPeers(broadcast_strategy, reply_tx)) .await?; reply_rx.await.map_err(|_| DhtActorError::ReplyCanceled) } pub async fn insert_message_hash(&mut self, signature: Vec<u8>) -> Result<bool, DhtActorError> { let (reply_tx, reply_rx) = oneshot::channel(); self.sender .send(DhtRequest::MsgHashCacheInsert(signature, reply_tx)) .await?; reply_rx.await.map_err(|_| DhtActorError::ReplyCanceled) } pub async fn get_metadata<T: MessageFormat>(&mut self, key: DhtMetadataKey) -> Result<Option<T>, DhtActorError> { let (reply_tx, reply_rx) = oneshot::channel(); self.sender.send(DhtRequest::GetMetadata(key, reply_tx)).await?; match reply_rx.await.map_err(|_| DhtActorError::ReplyCanceled)?? { Some(bytes) => T::from_binary(&bytes) .map(Some) .map_err(DhtActorError::StoredValueFailedToDeserialize), None => Ok(None), } } pub async fn set_metadata<T: MessageFormat>(&mut self, key: DhtMetadataKey, value: T) -> Result<(), DhtActorError> { let (reply_tx, reply_rx) = oneshot::channel(); let bytes = value.to_binary().map_err(DhtActorError::FailedToSerializeValue)?; self.sender.send(DhtRequest::SetMetadata(key, bytes, reply_tx)).await?; reply_rx.await.map_err(|_| DhtActorError::ReplyCanceled)? } } pub struct DhtActor { node_identity: Arc<NodeIdentity>, peer_manager: Arc<PeerManager>, database: DhtDatabase, outbound_requester: OutboundMessageRequester, connectivity: ConnectivityRequester, config: DhtConfig, shutdown_signal: Option<ShutdownSignal>, request_rx: Fuse<mpsc::Receiver<DhtRequest>>, msg_hash_cache: TtlCache<Vec<u8>, ()>, } impl DhtActor { #[allow(clippy::too_many_arguments)] pub fn new( config: DhtConfig, conn: DbConnection, node_identity: Arc<NodeIdentity>, peer_manager: Arc<PeerManager>, connectivity: ConnectivityRequester, outbound_requester: OutboundMessageRequester, request_rx: mpsc::Receiver<DhtRequest>, shutdown_signal: ShutdownSignal, ) -> Self { Self { msg_hash_cache: TtlCache::new(config.msg_hash_cache_capacity), config, database: DhtDatabase::new(conn), outbound_requester, peer_manager, connectivity, node_identity, shutdown_signal: Some(shutdown_signal), request_rx: request_rx.fuse(), } } pub fn spawn(self) { task::spawn(async move { if let Err(err) = self.run().await { error!(target: LOG_TARGET, "DhtActor failed to start with error: {:?}", err); } }); } async fn run(mut self) -> Result<(), DhtActorError> { let offline_ts = self .database .get_metadata_value::<DateTime<Utc>>(DhtMetadataKey::OfflineTimestamp) .await .ok() .flatten(); info!( target: LOG_TARGET, "DhtActor started. {}", offline_ts .map(|dt| format!("Dht has been offline since '{}'", dt)) .unwrap_or_else(String::new) ); let mut pending_jobs = FuturesUnordered::new(); let mut shutdown_signal = self .shutdown_signal .take() .expect("DhtActor initialized without shutdown_signal"); loop { futures::select! { request = self.request_rx.select_next_some() => { trace!(target: LOG_TARGET, "DhtActor received request: {}", request); pending_jobs.push(self.request_handler(request)); }, result = pending_jobs.select_next_some() => { if let Err(err) = result { debug!(target: LOG_TARGET, "Error when handling DHT request message. {}", err); } }, _ = shutdown_signal => { info!(target: LOG_TARGET, "DhtActor is shutting down because it received a shutdown signal."); self.mark_shutdown_time().await; break Ok(()); }, } } } async fn mark_shutdown_time(&self) { if let Err(err) = self .database .set_metadata_value(DhtMetadataKey::OfflineTimestamp, Utc::now()) .await { warn!(target: LOG_TARGET, "Failed to mark offline time: {:?}", err); } } fn request_handler(&mut self, request: DhtRequest) -> BoxFuture<'static, Result<(), DhtActorError>> { use DhtRequest::*; match request { SendJoin => { let node_identity = Arc::clone(&self.node_identity); let outbound_requester = self.outbound_requester.clone(); Box::pin(Self::broadcast_join(node_identity, outbound_requester)) }, MsgHashCacheInsert(hash, reply_tx) => { // No locks needed here. Downside is this isn't really async, however this should be // fine as it is very quick let already_exists = self .msg_hash_cache .insert(hash, (), self.config.msg_hash_cache_ttl) .is_some(); let result = reply_tx.send(already_exists).map_err(|_| DhtActorError::ReplyCanceled); Box::pin(future::ready(result)) }, SelectPeers(broadcast_strategy, reply_tx) => { let peer_manager = Arc::clone(&self.peer_manager); let node_identity = Arc::clone(&self.node_identity); let connectivity = self.connectivity.clone(); let config = self.config.clone(); Box::pin(async move { match Self::select_peers(config, node_identity, peer_manager, connectivity, broadcast_strategy) .await { Ok(peers) => reply_tx.send(peers).map_err(|_| DhtActorError::ReplyCanceled), Err(err) => { warn!(target: LOG_TARGET, "Peer selection failed: {:?}", err); reply_tx.send(Vec::new()).map_err(|_| DhtActorError::ReplyCanceled) }, } }) }, GetMetadata(key, reply_tx) => { let db = self.database.clone(); Box::pin(async move { let _ = reply_tx.send(db.get_metadata_value_bytes(key).await.map_err(Into::into)); Ok(()) }) }, SetMetadata(key, value, reply_tx) => { let db = self.database.clone(); Box::pin(async move { match db.set_metadata_value_bytes(key, value).await { Ok(_) => { debug!(target: LOG_TARGET, "Dht metadata '{}' set", key); let _ = reply_tx.send(Ok(())); }, Err(err) => { warn!(target: LOG_TARGET, "Unable to set metadata because {:?}", err); let _ = reply_tx.send(Err(err.into())); }, } Ok(()) }) }, } } async fn broadcast_join( node_identity: Arc<NodeIdentity>, mut outbound_requester: OutboundMessageRequester, ) -> Result<(), DhtActorError> { let message = JoinMessage::from(&node_identity); debug!(target: LOG_TARGET, "Sending Join message to closest peers"); outbound_requester .send_message_no_header( SendMessageParams::new() .closest(node_identity.node_id().clone(), vec![]) .with_destination(node_identity.node_id().clone().into()) .with_dht_message_type(DhtMessageType::Join) .force_origin() .finish(), message, ) .await .map_err(DhtActorError::FailedToBroadcastJoinMessage)?; Ok(()) } async fn select_peers( config: DhtConfig, node_identity: Arc<NodeIdentity>, peer_manager: Arc<PeerManager>, mut connectivity: ConnectivityRequester, broadcast_strategy: BroadcastStrategy, ) -> Result<Vec<NodeId>, DhtActorError> { use BroadcastStrategy::*; match broadcast_strategy { DirectNodeId(node_id) => { // Send to a particular peer matching the given node ID peer_manager .direct_identity_node_id(&node_id) .await .map(|peer| peer.map(|p| vec![p.node_id]).unwrap_or_default()) .map_err(Into::into) }, DirectPublicKey(public_key) => { // Send to a particular peer matching the given node ID peer_manager .direct_identity_public_key(&public_key) .await .map(|peer| peer.map(|p| vec![p.node_id]).unwrap_or_default()) .map_err(Into::into) }, Flood(exclude) => { let peers = connectivity .select_connections(ConnectivitySelection::all_nodes(exclude)) .await?; Ok(peers.into_iter().map(|p| p.peer_node_id().clone()).collect()) }, Closest(closest_request) => { let connections = connectivity .select_connections(ConnectivitySelection::closest_to( closest_request.node_id.clone(), config.broadcast_factor, closest_request.excluded_peers.clone(), )) .await?; let mut candidates = connections .iter() .map(|conn| conn.peer_node_id()) .cloned() .collect::<Vec<_>>(); if !closest_request.connected_only { let excluded = closest_request .excluded_peers .iter() .chain(candidates.iter()) .cloned() .collect::<Vec<_>>(); // If we don't have enough connections, let's select some more disconnected peers (at least 2) let n = cmp::max(config.broadcast_factor.saturating_sub(candidates.len()), 2); let additional = Self::select_closest_peers_for_propagation( &peer_manager, &closest_request.node_id, n, &excluded, PeerFeatures::MESSAGE_PROPAGATION, ) .await?; candidates.extend(additional); } Ok(candidates) }, Random(n, excluded) => { // Send to a random set of peers of size n that are Communication Nodes Ok(peer_manager .random_peers(n, &excluded) .await? .into_iter() .map(|p| p.node_id) .collect()) }, Broadcast(exclude) => { let connections = connectivity .select_connections(ConnectivitySelection::random_nodes( config.broadcast_factor, exclude.clone(), )) .await?; let candidates = connections .iter() .map(|c| c.peer_node_id()) .cloned() .collect::<Vec<_>>(); if candidates.is_empty() { warn!( target: LOG_TARGET, "Broadcast requested but there are no node peer connections available" ); } debug!( target: LOG_TARGET, "{} candidate(s) selected for broadcast", candidates.len() ); Ok(candidates) }, Propagate(destination, exclude) => { let dest_node_id = destination .node_id() .cloned() .or_else(|| destination.public_key().map(|pk| NodeId::from_public_key(pk))); let connections = match dest_node_id { Some(node_id) => { let dest_connection = connectivity.get_connection(node_id.clone()).await?; // If the peer was added to the exclude list, we don't want to send directly to the peer. // This ensures that we don't just send a message back to the peer that sent it. let dest_connection = dest_connection.filter(|c| !exclude.contains(c.peer_node_id())); match dest_connection { Some(conn) => { // We're connected to the destination, so send the message directly vec![conn] }, None => { // Select connections closer to the destination let mut connections = connectivity .select_connections(ConnectivitySelection::closest_to( node_id.clone(), config.num_neighbouring_nodes, exclude.clone(), )) .await?; // Exclude candidates that are further away from the destination than this node // unless this node has not selected a big enough sample i.e. this node is not well // connected if connections.len() >= config.propagation_factor { let dist_from_dest = node_identity.node_id().distance(&node_id); let before_len = connections.len(); connections = connections .into_iter() .filter(|conn| conn.peer_node_id().distance(&node_id) <= dist_from_dest) .collect::<Vec<_>>(); debug!( target: LOG_TARGET, "Filtered out {} node(s) that are further away than this node.", before_len - connections.len() ); } connections.truncate(config.propagation_factor); connections }, } }, None => { debug!( target: LOG_TARGET, "No destination for propagation, sending to {} random peers", config.propagation_factor ); connectivity .select_connections(ConnectivitySelection::random_nodes( config.propagation_factor, exclude.clone(), )) .await? }, }; if connections.is_empty() { warn!( target: LOG_TARGET, "Propagation requested but there are no node peer connections available" ); } let candidates = connections .iter() .map(|c| c.peer_node_id()) .cloned() .collect::<Vec<_>>(); debug!( target: LOG_TARGET, "{} candidate(s) selected for propagation to {}", candidates.len(), destination ); trace!( target: LOG_TARGET, "(ThisNode = {}) Candidates are {}", node_identity.node_id().short_str(), candidates.iter().map(|n| n.short_str()).collect::<Vec<_>>().join(", ") ); Ok(candidates) }, } } /// Selects at least `n` MESSAGE_PROPAGATION peers (assuming that many are known) that are closest to `node_id` as /// well as other peers which do not advertise the MESSAGE_PROPAGATION flag (unless excluded by some other means /// e.g. `excluded` list, filter_predicate etc. The filter_predicate is called on each peer excluding them from /// the final results if that returns false. /// /// This ensures that peers are selected which are able to propagate the message further while still allowing /// clients to propagate to non-propagation nodes if required (e.g. Discovery messages) async fn select_closest_peers_for_propagation( peer_manager: &PeerManager, node_id: &NodeId, n: usize, excluded_peers: &[NodeId], features: PeerFeatures, ) -> Result<Vec<NodeId>, DhtActorError> { // Fetch to all n nearest neighbour Communication Nodes // which are eligible for connection. // Currently that means: // - The peer isn't banned, // - it has the required features // - it didn't recently fail to connect, and // - it is not in the exclusion list in closest_request let mut connect_ineligable_count = 0; let mut banned_count = 0; let mut excluded_count = 0; let mut filtered_out_node_count = 0; let query = PeerQuery::new() .select_where(|peer| { if peer.is_banned() { banned_count += 1; return false; } if !peer.features.contains(features) { filtered_out_node_count += 1; return false; } if peer.is_offline() { connect_ineligable_count += 1; return false; } let is_excluded = excluded_peers.contains(&peer.node_id); if is_excluded { excluded_count += 1; return false; } true }) .sort_by(PeerQuerySortBy::DistanceFrom(&node_id)) .limit(n); let peers = peer_manager.perform_query(query).await?; let total_excluded = banned_count + connect_ineligable_count + excluded_count + filtered_out_node_count; if total_excluded > 0 { debug!( target: LOG_TARGET, "👨‍👧‍👦 Closest Peer Selection: {num_peers} peer(s) selected, {total} peer(s) not selected, {banned} \ banned, {filtered_out} not communication node, {not_connectable} are not connectable, {excluded} \ explicitly excluded", num_peers = peers.len(), total = total_excluded, banned = banned_count, filtered_out = filtered_out_node_count, not_connectable = connect_ineligable_count, excluded = excluded_count ); } Ok(peers.into_iter().map(|p| p.node_id).collect()) } } #[cfg(test)] mod test { use super::*; use crate::{ broadcast_strategy::BroadcastClosestRequest, envelope::NodeDestination, test_utils::{build_peer_manager, make_client_identity, make_node_identity}, }; use chrono::{DateTime, Utc}; use tari_comms::test_utils::mocks::{create_connectivity_mock, create_peer_connection_mock_pair}; use tari_shutdown::Shutdown; use tari_test_utils::random; async fn db_connection() -> DbConnection { let conn = DbConnection::connect_memory(random::string(8)).await.unwrap(); conn.migrate().await.unwrap(); conn } #[tokio_macros::test_basic] async fn send_join_request() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); let (out_tx, mut out_rx) = mpsc::channel(1); let (connectivity_manager, mock) = create_connectivity_mock(); mock.spawn(); let (actor_tx, actor_rx) = mpsc::channel(1); let mut requester = DhtRequester::new(actor_tx); let outbound_requester = OutboundMessageRequester::new(out_tx); let shutdown = Shutdown::new(); let actor = DhtActor::new( Default::default(), db_connection().await, node_identity, peer_manager, connectivity_manager, outbound_requester, actor_rx, shutdown.to_signal(), ); actor.spawn(); requester.send_join().await.unwrap(); let (params, _) = unwrap_oms_send_msg!(out_rx.next().await.unwrap()); assert_eq!(params.dht_message_type, DhtMessageType::Join); } #[tokio_macros::test_basic] async fn insert_message_signature() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); let (connectivity_manager, mock) = create_connectivity_mock(); mock.spawn(); let (out_tx, _) = mpsc::channel(1); let (actor_tx, actor_rx) = mpsc::channel(1); let mut requester = DhtRequester::new(actor_tx); let outbound_requester = OutboundMessageRequester::new(out_tx); let shutdown = Shutdown::new(); let actor = DhtActor::new( Default::default(), db_connection().await, node_identity, peer_manager, connectivity_manager, outbound_requester, actor_rx, shutdown.to_signal(), ); actor.spawn(); let signature = vec![1u8, 2, 3]; let is_dup = requester.insert_message_hash(signature.clone()).await.unwrap(); assert!(!is_dup); let is_dup = requester.insert_message_hash(signature).await.unwrap(); assert!(is_dup); let is_dup = requester.insert_message_hash(Vec::new()).await.unwrap(); assert!(!is_dup); } #[tokio_macros::test_basic] async fn select_peers() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); let client_node_identity = make_client_identity(); peer_manager.add_peer(client_node_identity.to_peer()).await.unwrap(); let (connectivity_manager, mock) = create_connectivity_mock(); let connectivity_manager_mock_state = mock.get_shared_state(); mock.spawn(); let (conn_in, _, conn_out, _) = create_peer_connection_mock_pair(1, client_node_identity.to_peer(), node_identity.to_peer()).await; connectivity_manager_mock_state.add_active_connection(conn_in).await; peer_manager.add_peer(make_node_identity().to_peer()).await.unwrap(); let (out_tx, _) = mpsc::channel(1); let (actor_tx, actor_rx) = mpsc::channel(1); let mut requester = DhtRequester::new(actor_tx); let outbound_requester = OutboundMessageRequester::new(out_tx); let shutdown = Shutdown::new(); let actor = DhtActor::new( Default::default(), db_connection().await, Arc::clone(&node_identity), peer_manager, connectivity_manager, outbound_requester, actor_rx, shutdown.to_signal(), ); actor.spawn(); let peers = requester .select_peers(BroadcastStrategy::Broadcast(Vec::new())) .await .unwrap(); assert_eq!(peers.len(), 0); connectivity_manager_mock_state .set_selected_connections(vec![conn_out.clone()]) .await; let peers = requester .select_peers(BroadcastStrategy::Broadcast(Vec::new())) .await .unwrap(); assert_eq!(peers.len(), 1); let peers = requester .select_peers(BroadcastStrategy::Propagate(NodeDestination::Unknown, Vec::new())) .await .unwrap(); assert_eq!(peers.len(), 1); let peers = requester .select_peers(BroadcastStrategy::Propagate( conn_out.peer_node_id().clone().into(), Vec::new(), )) .await .unwrap(); assert_eq!(peers.len(), 1); let send_request = Box::new(BroadcastClosestRequest { node_id: node_identity.node_id().clone(), excluded_peers: vec![], connected_only: false, }); let peers = requester .select_peers(BroadcastStrategy::Closest(send_request)) .await .unwrap(); assert_eq!(peers.len(), 2); let peers = requester .select_peers(BroadcastStrategy::DirectNodeId(Box::new( client_node_identity.node_id().clone(), ))) .await .unwrap(); assert_eq!(peers.len(), 1); } #[tokio_macros::test_basic] async fn get_and_set_metadata() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); let (out_tx, _out_rx) = mpsc::channel(1); let (actor_tx, actor_rx) = mpsc::channel(1); let (connectivity_manager, mock) = create_connectivity_mock(); mock.spawn(); let mut requester = DhtRequester::new(actor_tx); let outbound_requester = OutboundMessageRequester::new(out_tx); let mut shutdown = Shutdown::new(); let actor = DhtActor::new( Default::default(), db_connection().await, node_identity, peer_manager, connectivity_manager, outbound_requester, actor_rx, shutdown.to_signal(), ); actor.spawn(); assert!(requester .get_metadata::<DateTime<Utc>>(DhtMetadataKey::OfflineTimestamp) .await .unwrap() .is_none()); let ts = Utc::now(); requester .set_metadata(DhtMetadataKey::OfflineTimestamp, ts) .await .unwrap(); let got_ts = requester .get_metadata::<DateTime<Utc>>(DhtMetadataKey::OfflineTimestamp) .await .unwrap() .unwrap(); assert_eq!(got_ts, ts); // Check upsert let ts = Utc::now().checked_add_signed(chrono::Duration::seconds(123)).unwrap(); requester .set_metadata(DhtMetadataKey::OfflineTimestamp, ts) .await .unwrap(); let got_ts = requester .get_metadata::<DateTime<Utc>>(DhtMetadataKey::OfflineTimestamp) .await .unwrap() .unwrap(); assert_eq!(got_ts, ts); shutdown.trigger().unwrap(); } }
39.215029
120
0.545503
4bb6f9a61c95bf9f473401f66a154282d31bd78a
3,182
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] include!(concat!(env!("OUT_DIR"), "/fdkaac.rs")); #[cfg(test)] mod test { use super::*; use std::ffi::CStr; #[derive(Debug, PartialEq, Eq)] pub struct Version { pub title: String, pub build_date: String, pub build_time: String, pub module_id: u32, pub version: i32, pub flags: u32, pub version_string: String, } #[test] fn test_encoder_version() { unsafe { let mut encoder_info: LIB_INFO = LIB_INFO::default(); assert_eq!(aacEncGetLibInfo(&mut encoder_info as *mut LIB_INFO), AACENC_OK); let encoder_version = Version { title: CStr::from_ptr(encoder_info.title).to_str().unwrap().to_string(), build_date: CStr::from_ptr(encoder_info.build_date).to_str().unwrap().to_string(), build_time: CStr::from_ptr(encoder_info.build_time).to_str().unwrap().to_string(), module_id: encoder_info.module_id, version: encoder_info.version, flags: encoder_info.flags, version_string: CStr::from_ptr(&encoder_info.versionStr as *const i8) .to_str().unwrap().to_string(), }; assert_eq!(encoder_version, Version { title: "FDK Tools".to_string(), build_date: "Nov 6 2018".to_string(), build_time: "12:14:52".to_string(), module_id: 1, version: 33752576, flags: 0, version_string: "2.3.6".to_string() }); } } #[test] fn test_decoder_version() { unsafe { let mut decoder_info: LIB_INFO = LIB_INFO::default(); assert_eq!(aacDecoder_GetLibInfo(&mut decoder_info as *mut LIB_INFO), AAC_DEC_OK as i32); let decoder_version = Version { title: CStr::from_ptr(decoder_info.title).to_str().unwrap().to_string(), build_date: CStr::from_ptr(decoder_info.build_date).to_str().unwrap().to_string(), build_time: CStr::from_ptr(decoder_info.build_time).to_str().unwrap().to_string(), module_id: decoder_info.module_id, version: decoder_info.version, flags: decoder_info.flags, version_string: CStr::from_ptr(&decoder_info.versionStr as *const i8).to_str().unwrap().to_string(), }; assert_eq!(decoder_version, Version { title: "SBR Decoder".to_string(), build_date: "Nov 6 2018".to_string(), build_time: "12:13:32".to_string(), module_id: 5, version: 33688576, flags: 63, version_string: "2.2.12".to_string() }); } } }
38.337349
116
0.509114
28fb95f165fde509536b9acc669a5e1548a0ee1d
55,157
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A Folder represents an AST->AST fold; it accepts an AST piece, //! and returns a piece of the same type. So, for instance, macro //! expansion is a Folder that walks over an AST and produces another //! AST. //! //! Note: using a Folder (other than the MacroExpander Folder) on //! an AST before macro expansion is probably a bad idea. For instance, //! a folder renaming item names in a module will miss all of those //! that are created by the expansion of a macro. use ast::*; use ast; use syntax_pos::Span; use codemap::{Spanned, respan}; use parse::token::{self, Token}; use ptr::P; use symbol::keywords; use tokenstream::*; use util::small_vector::SmallVector; use util::move_map::MoveMap; use rustc_data_structures::sync::Lrc; pub trait Folder : Sized { // Any additions to this trait should happen in form // of a call to a public `noop_*` function that only calls // out to the folder again, not other `noop_*` functions. // // This is a necessary API workaround to the problem of not // being able to call out to the super default method // in an overridden default method. fn fold_crate(&mut self, c: Crate) -> Crate { noop_fold_crate(c, self) } fn fold_meta_items(&mut self, meta_items: Vec<MetaItem>) -> Vec<MetaItem> { noop_fold_meta_items(meta_items, self) } fn fold_meta_list_item(&mut self, list_item: NestedMetaItem) -> NestedMetaItem { noop_fold_meta_list_item(list_item, self) } fn fold_meta_item(&mut self, meta_item: MetaItem) -> MetaItem { noop_fold_meta_item(meta_item, self) } fn fold_use_tree(&mut self, use_tree: UseTree) -> UseTree { noop_fold_use_tree(use_tree, self) } fn fold_foreign_item(&mut self, ni: ForeignItem) -> SmallVector<ForeignItem> { noop_fold_foreign_item(ni, self) } fn fold_foreign_item_simple(&mut self, ni: ForeignItem) -> ForeignItem { noop_fold_foreign_item_simple(ni, self) } fn fold_item(&mut self, i: P<Item>) -> SmallVector<P<Item>> { noop_fold_item(i, self) } fn fold_item_simple(&mut self, i: Item) -> Item { noop_fold_item_simple(i, self) } fn fold_struct_field(&mut self, sf: StructField) -> StructField { noop_fold_struct_field(sf, self) } fn fold_item_kind(&mut self, i: ItemKind) -> ItemKind { noop_fold_item_kind(i, self) } fn fold_trait_item(&mut self, i: TraitItem) -> SmallVector<TraitItem> { noop_fold_trait_item(i, self) } fn fold_impl_item(&mut self, i: ImplItem) -> SmallVector<ImplItem> { noop_fold_impl_item(i, self) } fn fold_fn_decl(&mut self, d: P<FnDecl>) -> P<FnDecl> { noop_fold_fn_decl(d, self) } fn fold_block(&mut self, b: P<Block>) -> P<Block> { noop_fold_block(b, self) } fn fold_stmt(&mut self, s: Stmt) -> SmallVector<Stmt> { noop_fold_stmt(s, self) } fn fold_arm(&mut self, a: Arm) -> Arm { noop_fold_arm(a, self) } fn fold_pat(&mut self, p: P<Pat>) -> P<Pat> { noop_fold_pat(p, self) } fn fold_anon_const(&mut self, c: AnonConst) -> AnonConst { noop_fold_anon_const(c, self) } fn fold_expr(&mut self, e: P<Expr>) -> P<Expr> { e.map(|e| noop_fold_expr(e, self)) } fn fold_range_end(&mut self, re: RangeEnd) -> RangeEnd { noop_fold_range_end(re, self) } fn fold_opt_expr(&mut self, e: P<Expr>) -> Option<P<Expr>> { noop_fold_opt_expr(e, self) } fn fold_exprs(&mut self, es: Vec<P<Expr>>) -> Vec<P<Expr>> { noop_fold_exprs(es, self) } fn fold_ty(&mut self, t: P<Ty>) -> P<Ty> { noop_fold_ty(t, self) } fn fold_ty_binding(&mut self, t: TypeBinding) -> TypeBinding { noop_fold_ty_binding(t, self) } fn fold_mod(&mut self, m: Mod) -> Mod { noop_fold_mod(m, self) } fn fold_foreign_mod(&mut self, nm: ForeignMod) -> ForeignMod { noop_fold_foreign_mod(nm, self) } fn fold_global_asm(&mut self, ga: P<GlobalAsm>) -> P<GlobalAsm> { noop_fold_global_asm(ga, self) } fn fold_variant(&mut self, v: Variant) -> Variant { noop_fold_variant(v, self) } fn fold_ident(&mut self, i: Ident) -> Ident { noop_fold_ident(i, self) } fn fold_usize(&mut self, i: usize) -> usize { noop_fold_usize(i, self) } fn fold_path(&mut self, p: Path) -> Path { noop_fold_path(p, self) } fn fold_path_parameters(&mut self, p: PathParameters) -> PathParameters { noop_fold_path_parameters(p, self) } fn fold_angle_bracketed_parameter_data(&mut self, p: AngleBracketedParameterData) -> AngleBracketedParameterData { noop_fold_angle_bracketed_parameter_data(p, self) } fn fold_parenthesized_parameter_data(&mut self, p: ParenthesizedParameterData) -> ParenthesizedParameterData { noop_fold_parenthesized_parameter_data(p, self) } fn fold_local(&mut self, l: P<Local>) -> P<Local> { noop_fold_local(l, self) } fn fold_mac(&mut self, _mac: Mac) -> Mac { panic!("fold_mac disabled by default"); // NB: see note about macros above. // if you really want a folder that // works on macros, use this // definition in your trait impl: // fold::noop_fold_mac(_mac, self) } fn fold_macro_def(&mut self, def: MacroDef) -> MacroDef { noop_fold_macro_def(def, self) } fn fold_label(&mut self, label: Label) -> Label { noop_fold_label(label, self) } fn fold_lifetime(&mut self, l: Lifetime) -> Lifetime { noop_fold_lifetime(l, self) } fn fold_lifetime_def(&mut self, l: LifetimeDef) -> LifetimeDef { noop_fold_lifetime_def(l, self) } fn fold_attribute(&mut self, at: Attribute) -> Option<Attribute> { noop_fold_attribute(at, self) } fn fold_arg(&mut self, a: Arg) -> Arg { noop_fold_arg(a, self) } fn fold_generics(&mut self, generics: Generics) -> Generics { noop_fold_generics(generics, self) } fn fold_trait_ref(&mut self, p: TraitRef) -> TraitRef { noop_fold_trait_ref(p, self) } fn fold_poly_trait_ref(&mut self, p: PolyTraitRef) -> PolyTraitRef { noop_fold_poly_trait_ref(p, self) } fn fold_variant_data(&mut self, vdata: VariantData) -> VariantData { noop_fold_variant_data(vdata, self) } fn fold_lifetimes(&mut self, lts: Vec<Lifetime>) -> Vec<Lifetime> { noop_fold_lifetimes(lts, self) } fn fold_lifetime_defs(&mut self, lts: Vec<LifetimeDef>) -> Vec<LifetimeDef> { noop_fold_lifetime_defs(lts, self) } fn fold_ty_param(&mut self, tp: TyParam) -> TyParam { noop_fold_ty_param(tp, self) } fn fold_generic_param(&mut self, param: GenericParam) -> GenericParam { noop_fold_generic_param(param, self) } fn fold_generic_params(&mut self, params: Vec<GenericParam>) -> Vec<GenericParam> { noop_fold_generic_params(params, self) } fn fold_tt(&mut self, tt: TokenTree) -> TokenTree { noop_fold_tt(tt, self) } fn fold_tts(&mut self, tts: TokenStream) -> TokenStream { noop_fold_tts(tts, self) } fn fold_token(&mut self, t: token::Token) -> token::Token { noop_fold_token(t, self) } fn fold_interpolated(&mut self, nt: token::Nonterminal) -> token::Nonterminal { noop_fold_interpolated(nt, self) } fn fold_opt_lifetime(&mut self, o_lt: Option<Lifetime>) -> Option<Lifetime> { noop_fold_opt_lifetime(o_lt, self) } fn fold_opt_bounds(&mut self, b: Option<TyParamBounds>) -> Option<TyParamBounds> { noop_fold_opt_bounds(b, self) } fn fold_bounds(&mut self, b: TyParamBounds) -> TyParamBounds { noop_fold_bounds(b, self) } fn fold_ty_param_bound(&mut self, tpb: TyParamBound) -> TyParamBound { noop_fold_ty_param_bound(tpb, self) } fn fold_mt(&mut self, mt: MutTy) -> MutTy { noop_fold_mt(mt, self) } fn fold_field(&mut self, field: Field) -> Field { noop_fold_field(field, self) } fn fold_where_clause(&mut self, where_clause: WhereClause) -> WhereClause { noop_fold_where_clause(where_clause, self) } fn fold_where_predicate(&mut self, where_predicate: WherePredicate) -> WherePredicate { noop_fold_where_predicate(where_predicate, self) } fn fold_vis(&mut self, vis: Visibility) -> Visibility { noop_fold_vis(vis, self) } fn new_id(&mut self, i: NodeId) -> NodeId { i } fn new_span(&mut self, sp: Span) -> Span { sp } } pub fn noop_fold_meta_items<T: Folder>(meta_items: Vec<MetaItem>, fld: &mut T) -> Vec<MetaItem> { meta_items.move_map(|x| fld.fold_meta_item(x)) } pub fn noop_fold_use_tree<T: Folder>(use_tree: UseTree, fld: &mut T) -> UseTree { UseTree { span: fld.new_span(use_tree.span), prefix: fld.fold_path(use_tree.prefix), kind: match use_tree.kind { UseTreeKind::Simple(rename) => UseTreeKind::Simple(rename.map(|ident| fld.fold_ident(ident))), UseTreeKind::Glob => UseTreeKind::Glob, UseTreeKind::Nested(items) => UseTreeKind::Nested(items.move_map(|(tree, id)| { (fld.fold_use_tree(tree), fld.new_id(id)) })), }, } } pub fn fold_attrs<T: Folder>(attrs: Vec<Attribute>, fld: &mut T) -> Vec<Attribute> { attrs.move_flat_map(|x| fld.fold_attribute(x)) } pub fn fold_thin_attrs<T: Folder>(attrs: ThinVec<Attribute>, fld: &mut T) -> ThinVec<Attribute> { fold_attrs(attrs.into(), fld).into() } pub fn noop_fold_arm<T: Folder>(Arm {attrs, pats, guard, body}: Arm, fld: &mut T) -> Arm { Arm { attrs: fold_attrs(attrs, fld), pats: pats.move_map(|x| fld.fold_pat(x)), guard: guard.map(|x| fld.fold_expr(x)), body: fld.fold_expr(body), } } pub fn noop_fold_ty_binding<T: Folder>(b: TypeBinding, fld: &mut T) -> TypeBinding { TypeBinding { id: fld.new_id(b.id), ident: fld.fold_ident(b.ident), ty: fld.fold_ty(b.ty), span: fld.new_span(b.span), } } pub fn noop_fold_ty<T: Folder>(t: P<Ty>, fld: &mut T) -> P<Ty> { t.map(|Ty {id, node, span}| Ty { id: fld.new_id(id), node: match node { TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => node, TyKind::Slice(ty) => TyKind::Slice(fld.fold_ty(ty)), TyKind::Ptr(mt) => TyKind::Ptr(fld.fold_mt(mt)), TyKind::Rptr(region, mt) => { TyKind::Rptr(fld.fold_opt_lifetime(region), fld.fold_mt(mt)) } TyKind::BareFn(f) => { TyKind::BareFn(f.map(|BareFnTy {generic_params, unsafety, abi, decl}| BareFnTy { generic_params: fld.fold_generic_params(generic_params), unsafety, abi, decl: fld.fold_fn_decl(decl) })) } TyKind::Never => node, TyKind::Tup(tys) => TyKind::Tup(tys.move_map(|ty| fld.fold_ty(ty))), TyKind::Paren(ty) => TyKind::Paren(fld.fold_ty(ty)), TyKind::Path(qself, path) => { let qself = qself.map(|QSelf { ty, position }| { QSelf { ty: fld.fold_ty(ty), position, } }); TyKind::Path(qself, fld.fold_path(path)) } TyKind::Array(ty, length) => { TyKind::Array(fld.fold_ty(ty), fld.fold_anon_const(length)) } TyKind::Typeof(expr) => { TyKind::Typeof(fld.fold_anon_const(expr)) } TyKind::TraitObject(bounds, syntax) => { TyKind::TraitObject(bounds.move_map(|b| fld.fold_ty_param_bound(b)), syntax) } TyKind::ImplTrait(bounds) => { TyKind::ImplTrait(bounds.move_map(|b| fld.fold_ty_param_bound(b))) } TyKind::Mac(mac) => { TyKind::Mac(fld.fold_mac(mac)) } }, span: fld.new_span(span) }) } pub fn noop_fold_foreign_mod<T: Folder>(ForeignMod {abi, items}: ForeignMod, fld: &mut T) -> ForeignMod { ForeignMod { abi, items: items.move_flat_map(|x| fld.fold_foreign_item(x)), } } pub fn noop_fold_global_asm<T: Folder>(ga: P<GlobalAsm>, _: &mut T) -> P<GlobalAsm> { ga } pub fn noop_fold_variant<T: Folder>(v: Variant, fld: &mut T) -> Variant { Spanned { node: Variant_ { ident: fld.fold_ident(v.node.ident), attrs: fold_attrs(v.node.attrs, fld), data: fld.fold_variant_data(v.node.data), disr_expr: v.node.disr_expr.map(|e| fld.fold_anon_const(e)), }, span: fld.new_span(v.span), } } pub fn noop_fold_ident<T: Folder>(ident: Ident, fld: &mut T) -> Ident { Ident::new(ident.name, fld.new_span(ident.span)) } pub fn noop_fold_usize<T: Folder>(i: usize, _: &mut T) -> usize { i } pub fn noop_fold_path<T: Folder>(Path { segments, span }: Path, fld: &mut T) -> Path { Path { segments: segments.move_map(|PathSegment {ident, parameters}| PathSegment { ident: fld.fold_ident(ident), parameters: parameters.map(|ps| ps.map(|ps| fld.fold_path_parameters(ps))), }), span: fld.new_span(span) } } pub fn noop_fold_path_parameters<T: Folder>(path_parameters: PathParameters, fld: &mut T) -> PathParameters { match path_parameters { PathParameters::AngleBracketed(data) => PathParameters::AngleBracketed(fld.fold_angle_bracketed_parameter_data(data)), PathParameters::Parenthesized(data) => PathParameters::Parenthesized(fld.fold_parenthesized_parameter_data(data)), } } pub fn noop_fold_angle_bracketed_parameter_data<T: Folder>(data: AngleBracketedParameterData, fld: &mut T) -> AngleBracketedParameterData { let AngleBracketedParameterData { lifetimes, types, bindings, span } = data; AngleBracketedParameterData { lifetimes: fld.fold_lifetimes(lifetimes), types: types.move_map(|ty| fld.fold_ty(ty)), bindings: bindings.move_map(|b| fld.fold_ty_binding(b)), span: fld.new_span(span) } } pub fn noop_fold_parenthesized_parameter_data<T: Folder>(data: ParenthesizedParameterData, fld: &mut T) -> ParenthesizedParameterData { let ParenthesizedParameterData { inputs, output, span } = data; ParenthesizedParameterData { inputs: inputs.move_map(|ty| fld.fold_ty(ty)), output: output.map(|ty| fld.fold_ty(ty)), span: fld.new_span(span) } } pub fn noop_fold_local<T: Folder>(l: P<Local>, fld: &mut T) -> P<Local> { l.map(|Local {id, pat, ty, init, span, attrs}| Local { id: fld.new_id(id), pat: fld.fold_pat(pat), ty: ty.map(|t| fld.fold_ty(t)), init: init.map(|e| fld.fold_expr(e)), span: fld.new_span(span), attrs: fold_attrs(attrs.into(), fld).into(), }) } pub fn noop_fold_attribute<T: Folder>(attr: Attribute, fld: &mut T) -> Option<Attribute> { Some(Attribute { id: attr.id, style: attr.style, path: fld.fold_path(attr.path), tokens: fld.fold_tts(attr.tokens), is_sugared_doc: attr.is_sugared_doc, span: fld.new_span(attr.span), }) } pub fn noop_fold_mac<T: Folder>(Spanned {node, span}: Mac, fld: &mut T) -> Mac { Spanned { node: Mac_ { tts: fld.fold_tts(node.stream()).into(), path: fld.fold_path(node.path), }, span: fld.new_span(span) } } pub fn noop_fold_macro_def<T: Folder>(def: MacroDef, fld: &mut T) -> MacroDef { MacroDef { tokens: fld.fold_tts(def.tokens.into()).into(), legacy: def.legacy, } } pub fn noop_fold_meta_list_item<T: Folder>(li: NestedMetaItem, fld: &mut T) -> NestedMetaItem { Spanned { node: match li.node { NestedMetaItemKind::MetaItem(mi) => { NestedMetaItemKind::MetaItem(fld.fold_meta_item(mi)) }, NestedMetaItemKind::Literal(lit) => NestedMetaItemKind::Literal(lit) }, span: fld.new_span(li.span) } } pub fn noop_fold_meta_item<T: Folder>(mi: MetaItem, fld: &mut T) -> MetaItem { MetaItem { ident: mi.ident, node: match mi.node { MetaItemKind::Word => MetaItemKind::Word, MetaItemKind::List(mis) => { MetaItemKind::List(mis.move_map(|e| fld.fold_meta_list_item(e))) }, MetaItemKind::NameValue(s) => MetaItemKind::NameValue(s), }, span: fld.new_span(mi.span) } } pub fn noop_fold_arg<T: Folder>(Arg {id, pat, ty}: Arg, fld: &mut T) -> Arg { Arg { id: fld.new_id(id), pat: fld.fold_pat(pat), ty: fld.fold_ty(ty) } } pub fn noop_fold_tt<T: Folder>(tt: TokenTree, fld: &mut T) -> TokenTree { match tt { TokenTree::Token(span, tok) => TokenTree::Token(fld.new_span(span), fld.fold_token(tok)), TokenTree::Delimited(span, delimed) => TokenTree::Delimited(fld.new_span(span), Delimited { tts: fld.fold_tts(delimed.stream()).into(), delim: delimed.delim, }), } } pub fn noop_fold_tts<T: Folder>(tts: TokenStream, fld: &mut T) -> TokenStream { tts.map(|tt| fld.fold_tt(tt)) } // apply ident folder if it's an ident, apply other folds to interpolated nodes pub fn noop_fold_token<T: Folder>(t: token::Token, fld: &mut T) -> token::Token { match t { token::Ident(id, is_raw) => token::Ident(fld.fold_ident(id), is_raw), token::Lifetime(id) => token::Lifetime(fld.fold_ident(id)), token::Interpolated(nt) => { let nt = match Lrc::try_unwrap(nt) { Ok(nt) => nt, Err(nt) => (*nt).clone(), }; Token::interpolated(fld.fold_interpolated(nt.0)) } _ => t } } /// apply folder to elements of interpolated nodes // // NB: this can occur only when applying a fold to partially expanded code, where // parsed pieces have gotten implanted ito *other* macro invocations. This is relevant // for macro hygiene, but possibly not elsewhere. // // One problem here occurs because the types for fold_item, fold_stmt, etc. allow the // folder to return *multiple* items; this is a problem for the nodes here, because // they insist on having exactly one piece. One solution would be to mangle the fold // trait to include one-to-many and one-to-one versions of these entry points, but that // would probably confuse a lot of people and help very few. Instead, I'm just going // to put in dynamic checks. I think the performance impact of this will be pretty much // nonexistent. The danger is that someone will apply a fold to a partially expanded // node, and will be confused by the fact that their "fold_item" or "fold_stmt" isn't // getting called on NtItem or NtStmt nodes. Hopefully they'll wind up reading this // comment, and doing something appropriate. // // BTW, design choice: I considered just changing the type of, e.g., NtItem to contain // multiple items, but decided against it when I looked at parse_item_or_view_item and // tried to figure out what I would do with multiple items there.... pub fn noop_fold_interpolated<T: Folder>(nt: token::Nonterminal, fld: &mut T) -> token::Nonterminal { match nt { token::NtItem(item) => token::NtItem(fld.fold_item(item) // this is probably okay, because the only folds likely // to peek inside interpolated nodes will be renamings/markings, // which map single items to single items .expect_one("expected fold to produce exactly one item")), token::NtBlock(block) => token::NtBlock(fld.fold_block(block)), token::NtStmt(stmt) => token::NtStmt(fld.fold_stmt(stmt) // this is probably okay, because the only folds likely // to peek inside interpolated nodes will be renamings/markings, // which map single items to single items .expect_one("expected fold to produce exactly one statement")), token::NtPat(pat) => token::NtPat(fld.fold_pat(pat)), token::NtExpr(expr) => token::NtExpr(fld.fold_expr(expr)), token::NtTy(ty) => token::NtTy(fld.fold_ty(ty)), token::NtIdent(ident, is_raw) => token::NtIdent(fld.fold_ident(ident), is_raw), token::NtLifetime(ident) => token::NtLifetime(fld.fold_ident(ident)), token::NtLiteral(expr) => token::NtLiteral(fld.fold_expr(expr)), token::NtMeta(meta) => token::NtMeta(fld.fold_meta_item(meta)), token::NtPath(path) => token::NtPath(fld.fold_path(path)), token::NtTT(tt) => token::NtTT(fld.fold_tt(tt)), token::NtArm(arm) => token::NtArm(fld.fold_arm(arm)), token::NtImplItem(item) => token::NtImplItem(fld.fold_impl_item(item) .expect_one("expected fold to produce exactly one item")), token::NtTraitItem(item) => token::NtTraitItem(fld.fold_trait_item(item) .expect_one("expected fold to produce exactly one item")), token::NtGenerics(generics) => token::NtGenerics(fld.fold_generics(generics)), token::NtWhereClause(where_clause) => token::NtWhereClause(fld.fold_where_clause(where_clause)), token::NtArg(arg) => token::NtArg(fld.fold_arg(arg)), token::NtVis(vis) => token::NtVis(fld.fold_vis(vis)), token::NtForeignItem(ni) => token::NtForeignItem(fld.fold_foreign_item(ni) // see reasoning above .expect_one("expected fold to produce exactly one item")), } } pub fn noop_fold_fn_decl<T: Folder>(decl: P<FnDecl>, fld: &mut T) -> P<FnDecl> { decl.map(|FnDecl {inputs, output, variadic}| FnDecl { inputs: inputs.move_map(|x| fld.fold_arg(x)), output: match output { FunctionRetTy::Ty(ty) => FunctionRetTy::Ty(fld.fold_ty(ty)), FunctionRetTy::Default(span) => FunctionRetTy::Default(fld.new_span(span)), }, variadic, }) } pub fn noop_fold_ty_param_bound<T>(tpb: TyParamBound, fld: &mut T) -> TyParamBound where T: Folder { match tpb { TraitTyParamBound(ty, modifier) => TraitTyParamBound(fld.fold_poly_trait_ref(ty), modifier), RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)), } } pub fn noop_fold_ty_param<T: Folder>(tp: TyParam, fld: &mut T) -> TyParam { let TyParam {attrs, id, ident, bounds, default} = tp; let attrs: Vec<_> = attrs.into(); TyParam { attrs: attrs.into_iter() .flat_map(|x| fld.fold_attribute(x).into_iter()) .collect::<Vec<_>>() .into(), id: fld.new_id(id), ident: fld.fold_ident(ident), bounds: fld.fold_bounds(bounds), default: default.map(|x| fld.fold_ty(x)), } } pub fn noop_fold_generic_param<T: Folder>(param: GenericParam, fld: &mut T) -> GenericParam { match param { GenericParam::Lifetime(l) => GenericParam::Lifetime(fld.fold_lifetime_def(l)), GenericParam::Type(t) => GenericParam::Type(fld.fold_ty_param(t)), } } pub fn noop_fold_generic_params<T: Folder>( params: Vec<GenericParam>, fld: &mut T ) -> Vec<GenericParam> { params.move_map(|p| fld.fold_generic_param(p)) } pub fn noop_fold_label<T: Folder>(label: Label, fld: &mut T) -> Label { Label { ident: fld.fold_ident(label.ident), } } pub fn noop_fold_lifetime<T: Folder>(l: Lifetime, fld: &mut T) -> Lifetime { Lifetime { id: fld.new_id(l.id), ident: fld.fold_ident(l.ident), } } pub fn noop_fold_lifetime_def<T: Folder>(l: LifetimeDef, fld: &mut T) -> LifetimeDef { let attrs: Vec<_> = l.attrs.into(); LifetimeDef { attrs: attrs.into_iter() .flat_map(|x| fld.fold_attribute(x).into_iter()) .collect::<Vec<_>>() .into(), lifetime: fld.fold_lifetime(l.lifetime), bounds: fld.fold_lifetimes(l.bounds), } } pub fn noop_fold_lifetimes<T: Folder>(lts: Vec<Lifetime>, fld: &mut T) -> Vec<Lifetime> { lts.move_map(|l| fld.fold_lifetime(l)) } pub fn noop_fold_lifetime_defs<T: Folder>(lts: Vec<LifetimeDef>, fld: &mut T) -> Vec<LifetimeDef> { lts.move_map(|l| fld.fold_lifetime_def(l)) } pub fn noop_fold_opt_lifetime<T: Folder>(o_lt: Option<Lifetime>, fld: &mut T) -> Option<Lifetime> { o_lt.map(|lt| fld.fold_lifetime(lt)) } pub fn noop_fold_generics<T: Folder>(Generics { params, where_clause, span }: Generics, fld: &mut T) -> Generics { Generics { params: fld.fold_generic_params(params), where_clause: fld.fold_where_clause(where_clause), span: fld.new_span(span), } } pub fn noop_fold_where_clause<T: Folder>( WhereClause {id, predicates, span}: WhereClause, fld: &mut T) -> WhereClause { WhereClause { id: fld.new_id(id), predicates: predicates.move_map(|predicate| { fld.fold_where_predicate(predicate) }), span, } } pub fn noop_fold_where_predicate<T: Folder>( pred: WherePredicate, fld: &mut T) -> WherePredicate { match pred { ast::WherePredicate::BoundPredicate(ast::WhereBoundPredicate{bound_generic_params, bounded_ty, bounds, span}) => { ast::WherePredicate::BoundPredicate(ast::WhereBoundPredicate { bound_generic_params: fld.fold_generic_params(bound_generic_params), bounded_ty: fld.fold_ty(bounded_ty), bounds: bounds.move_map(|x| fld.fold_ty_param_bound(x)), span: fld.new_span(span) }) } ast::WherePredicate::RegionPredicate(ast::WhereRegionPredicate{lifetime, bounds, span}) => { ast::WherePredicate::RegionPredicate(ast::WhereRegionPredicate { span: fld.new_span(span), lifetime: fld.fold_lifetime(lifetime), bounds: bounds.move_map(|bound| fld.fold_lifetime(bound)) }) } ast::WherePredicate::EqPredicate(ast::WhereEqPredicate{id, lhs_ty, rhs_ty, span}) => { ast::WherePredicate::EqPredicate(ast::WhereEqPredicate{ id: fld.new_id(id), lhs_ty: fld.fold_ty(lhs_ty), rhs_ty: fld.fold_ty(rhs_ty), span: fld.new_span(span) }) } } } pub fn noop_fold_variant_data<T: Folder>(vdata: VariantData, fld: &mut T) -> VariantData { match vdata { ast::VariantData::Struct(fields, id) => { ast::VariantData::Struct(fields.move_map(|f| fld.fold_struct_field(f)), fld.new_id(id)) } ast::VariantData::Tuple(fields, id) => { ast::VariantData::Tuple(fields.move_map(|f| fld.fold_struct_field(f)), fld.new_id(id)) } ast::VariantData::Unit(id) => ast::VariantData::Unit(fld.new_id(id)) } } pub fn noop_fold_trait_ref<T: Folder>(p: TraitRef, fld: &mut T) -> TraitRef { let id = fld.new_id(p.ref_id); let TraitRef { path, ref_id: _, } = p; ast::TraitRef { path: fld.fold_path(path), ref_id: id, } } pub fn noop_fold_poly_trait_ref<T: Folder>(p: PolyTraitRef, fld: &mut T) -> PolyTraitRef { ast::PolyTraitRef { bound_generic_params: fld.fold_generic_params(p.bound_generic_params), trait_ref: fld.fold_trait_ref(p.trait_ref), span: fld.new_span(p.span), } } pub fn noop_fold_struct_field<T: Folder>(f: StructField, fld: &mut T) -> StructField { StructField { span: fld.new_span(f.span), id: fld.new_id(f.id), ident: f.ident.map(|ident| fld.fold_ident(ident)), vis: fld.fold_vis(f.vis), ty: fld.fold_ty(f.ty), attrs: fold_attrs(f.attrs, fld), } } pub fn noop_fold_field<T: Folder>(f: Field, folder: &mut T) -> Field { Field { ident: folder.fold_ident(f.ident), expr: folder.fold_expr(f.expr), span: folder.new_span(f.span), is_shorthand: f.is_shorthand, attrs: fold_thin_attrs(f.attrs, folder), } } pub fn noop_fold_mt<T: Folder>(MutTy {ty, mutbl}: MutTy, folder: &mut T) -> MutTy { MutTy { ty: folder.fold_ty(ty), mutbl, } } pub fn noop_fold_opt_bounds<T: Folder>(b: Option<TyParamBounds>, folder: &mut T) -> Option<TyParamBounds> { b.map(|bounds| folder.fold_bounds(bounds)) } fn noop_fold_bounds<T: Folder>(bounds: TyParamBounds, folder: &mut T) -> TyParamBounds { bounds.move_map(|bound| folder.fold_ty_param_bound(bound)) } pub fn noop_fold_block<T: Folder>(b: P<Block>, folder: &mut T) -> P<Block> { b.map(|Block {id, stmts, rules, span, recovered}| Block { id: folder.new_id(id), stmts: stmts.move_flat_map(|s| folder.fold_stmt(s).into_iter()), rules, span: folder.new_span(span), recovered, }) } pub fn noop_fold_item_kind<T: Folder>(i: ItemKind, folder: &mut T) -> ItemKind { match i { ItemKind::ExternCrate(orig_name) => ItemKind::ExternCrate(orig_name), ItemKind::Use(use_tree) => { ItemKind::Use(use_tree.map(|tree| folder.fold_use_tree(tree))) } ItemKind::Static(t, m, e) => { ItemKind::Static(folder.fold_ty(t), m, folder.fold_expr(e)) } ItemKind::Const(t, e) => { ItemKind::Const(folder.fold_ty(t), folder.fold_expr(e)) } ItemKind::Fn(decl, unsafety, constness, abi, generics, body) => { let generics = folder.fold_generics(generics); let decl = folder.fold_fn_decl(decl); let body = folder.fold_block(body); ItemKind::Fn(decl, unsafety, constness, abi, generics, body) } ItemKind::Mod(m) => ItemKind::Mod(folder.fold_mod(m)), ItemKind::ForeignMod(nm) => ItemKind::ForeignMod(folder.fold_foreign_mod(nm)), ItemKind::GlobalAsm(ga) => ItemKind::GlobalAsm(folder.fold_global_asm(ga)), ItemKind::Ty(t, generics) => { ItemKind::Ty(folder.fold_ty(t), folder.fold_generics(generics)) } ItemKind::Enum(enum_definition, generics) => { let generics = folder.fold_generics(generics); let variants = enum_definition.variants.move_map(|x| folder.fold_variant(x)); ItemKind::Enum(ast::EnumDef { variants: variants }, generics) } ItemKind::Struct(struct_def, generics) => { let generics = folder.fold_generics(generics); ItemKind::Struct(folder.fold_variant_data(struct_def), generics) } ItemKind::Union(struct_def, generics) => { let generics = folder.fold_generics(generics); ItemKind::Union(folder.fold_variant_data(struct_def), generics) } ItemKind::Impl(unsafety, polarity, defaultness, generics, ifce, ty, impl_items) => ItemKind::Impl( unsafety, polarity, defaultness, folder.fold_generics(generics), ifce.map(|trait_ref| folder.fold_trait_ref(trait_ref.clone())), folder.fold_ty(ty), impl_items.move_flat_map(|item| folder.fold_impl_item(item)), ), ItemKind::Trait(is_auto, unsafety, generics, bounds, items) => ItemKind::Trait( is_auto, unsafety, folder.fold_generics(generics), folder.fold_bounds(bounds), items.move_flat_map(|item| folder.fold_trait_item(item)), ), ItemKind::TraitAlias(generics, bounds) => ItemKind::TraitAlias( folder.fold_generics(generics), folder.fold_bounds(bounds)), ItemKind::Mac(m) => ItemKind::Mac(folder.fold_mac(m)), ItemKind::MacroDef(def) => ItemKind::MacroDef(folder.fold_macro_def(def)), } } pub fn noop_fold_trait_item<T: Folder>(i: TraitItem, folder: &mut T) -> SmallVector<TraitItem> { SmallVector::one(TraitItem { id: folder.new_id(i.id), ident: folder.fold_ident(i.ident), attrs: fold_attrs(i.attrs, folder), generics: folder.fold_generics(i.generics), node: match i.node { TraitItemKind::Const(ty, default) => { TraitItemKind::Const(folder.fold_ty(ty), default.map(|x| folder.fold_expr(x))) } TraitItemKind::Method(sig, body) => { TraitItemKind::Method(noop_fold_method_sig(sig, folder), body.map(|x| folder.fold_block(x))) } TraitItemKind::Type(bounds, default) => { TraitItemKind::Type(folder.fold_bounds(bounds), default.map(|x| folder.fold_ty(x))) } ast::TraitItemKind::Macro(mac) => { TraitItemKind::Macro(folder.fold_mac(mac)) } }, span: folder.new_span(i.span), tokens: i.tokens, }) } pub fn noop_fold_impl_item<T: Folder>(i: ImplItem, folder: &mut T) -> SmallVector<ImplItem> { SmallVector::one(ImplItem { id: folder.new_id(i.id), vis: folder.fold_vis(i.vis), ident: folder.fold_ident(i.ident), attrs: fold_attrs(i.attrs, folder), generics: folder.fold_generics(i.generics), defaultness: i.defaultness, node: match i.node { ast::ImplItemKind::Const(ty, expr) => { ast::ImplItemKind::Const(folder.fold_ty(ty), folder.fold_expr(expr)) } ast::ImplItemKind::Method(sig, body) => { ast::ImplItemKind::Method(noop_fold_method_sig(sig, folder), folder.fold_block(body)) } ast::ImplItemKind::Type(ty) => ast::ImplItemKind::Type(folder.fold_ty(ty)), ast::ImplItemKind::Macro(mac) => ast::ImplItemKind::Macro(folder.fold_mac(mac)) }, span: folder.new_span(i.span), tokens: i.tokens, }) } pub fn noop_fold_mod<T: Folder>(Mod {inner, items}: Mod, folder: &mut T) -> Mod { Mod { inner: folder.new_span(inner), items: items.move_flat_map(|x| folder.fold_item(x)), } } pub fn noop_fold_crate<T: Folder>(Crate {module, attrs, span}: Crate, folder: &mut T) -> Crate { let mut items = folder.fold_item(P(ast::Item { ident: keywords::Invalid.ident(), attrs, id: ast::DUMMY_NODE_ID, vis: respan(span.shrink_to_lo(), ast::VisibilityKind::Public), span, node: ast::ItemKind::Mod(module), tokens: None, })).into_iter(); let (module, attrs, span) = match items.next() { Some(item) => { assert!(items.next().is_none(), "a crate cannot expand to more than one item"); item.and_then(|ast::Item { attrs, span, node, .. }| { match node { ast::ItemKind::Mod(m) => (m, attrs, span), _ => panic!("fold converted a module to not a module"), } }) } None => (ast::Mod { inner: span, items: vec![], }, vec![], span) }; Crate { module, attrs, span, } } // fold one item into possibly many items pub fn noop_fold_item<T: Folder>(i: P<Item>, folder: &mut T) -> SmallVector<P<Item>> { SmallVector::one(i.map(|i| folder.fold_item_simple(i))) } // fold one item into exactly one item pub fn noop_fold_item_simple<T: Folder>(Item {id, ident, attrs, node, vis, span, tokens}: Item, folder: &mut T) -> Item { Item { id: folder.new_id(id), vis: folder.fold_vis(vis), ident: folder.fold_ident(ident), attrs: fold_attrs(attrs, folder), node: folder.fold_item_kind(node), span: folder.new_span(span), // FIXME: if this is replaced with a call to `folder.fold_tts` it causes // an ICE during resolve... odd! tokens, } } pub fn noop_fold_foreign_item<T: Folder>(ni: ForeignItem, folder: &mut T) -> SmallVector<ForeignItem> { SmallVector::one(folder.fold_foreign_item_simple(ni)) } pub fn noop_fold_foreign_item_simple<T: Folder>(ni: ForeignItem, folder: &mut T) -> ForeignItem { ForeignItem { id: folder.new_id(ni.id), vis: folder.fold_vis(ni.vis), ident: folder.fold_ident(ni.ident), attrs: fold_attrs(ni.attrs, folder), node: match ni.node { ForeignItemKind::Fn(fdec, generics) => { ForeignItemKind::Fn(folder.fold_fn_decl(fdec), folder.fold_generics(generics)) } ForeignItemKind::Static(t, m) => { ForeignItemKind::Static(folder.fold_ty(t), m) } ForeignItemKind::Ty => ForeignItemKind::Ty, ForeignItemKind::Macro(mac) => ForeignItemKind::Macro(folder.fold_mac(mac)), }, span: folder.new_span(ni.span) } } pub fn noop_fold_method_sig<T: Folder>(sig: MethodSig, folder: &mut T) -> MethodSig { MethodSig { abi: sig.abi, unsafety: sig.unsafety, constness: sig.constness, decl: folder.fold_fn_decl(sig.decl) } } pub fn noop_fold_pat<T: Folder>(p: P<Pat>, folder: &mut T) -> P<Pat> { p.map(|Pat {id, node, span}| Pat { id: folder.new_id(id), node: match node { PatKind::Wild => PatKind::Wild, PatKind::Ident(binding_mode, ident, sub) => { PatKind::Ident(binding_mode, folder.fold_ident(ident), sub.map(|x| folder.fold_pat(x))) } PatKind::Lit(e) => PatKind::Lit(folder.fold_expr(e)), PatKind::TupleStruct(pth, pats, ddpos) => { PatKind::TupleStruct(folder.fold_path(pth), pats.move_map(|x| folder.fold_pat(x)), ddpos) } PatKind::Path(opt_qself, pth) => { let opt_qself = opt_qself.map(|qself| { QSelf { ty: folder.fold_ty(qself.ty), position: qself.position } }); PatKind::Path(opt_qself, folder.fold_path(pth)) } PatKind::Struct(pth, fields, etc) => { let pth = folder.fold_path(pth); let fs = fields.move_map(|f| { Spanned { span: folder.new_span(f.span), node: ast::FieldPat { ident: folder.fold_ident(f.node.ident), pat: folder.fold_pat(f.node.pat), is_shorthand: f.node.is_shorthand, attrs: fold_attrs(f.node.attrs.into(), folder).into() }} }); PatKind::Struct(pth, fs, etc) } PatKind::Tuple(elts, ddpos) => { PatKind::Tuple(elts.move_map(|x| folder.fold_pat(x)), ddpos) } PatKind::Box(inner) => PatKind::Box(folder.fold_pat(inner)), PatKind::Ref(inner, mutbl) => PatKind::Ref(folder.fold_pat(inner), mutbl), PatKind::Range(e1, e2, end) => { PatKind::Range(folder.fold_expr(e1), folder.fold_expr(e2), folder.fold_range_end(end)) }, PatKind::Slice(before, slice, after) => { PatKind::Slice(before.move_map(|x| folder.fold_pat(x)), slice.map(|x| folder.fold_pat(x)), after.move_map(|x| folder.fold_pat(x))) } PatKind::Paren(inner) => PatKind::Paren(folder.fold_pat(inner)), PatKind::Mac(mac) => PatKind::Mac(folder.fold_mac(mac)) }, span: folder.new_span(span) }) } pub fn noop_fold_range_end<T: Folder>(end: RangeEnd, _folder: &mut T) -> RangeEnd { end } pub fn noop_fold_anon_const<T: Folder>(constant: AnonConst, folder: &mut T) -> AnonConst { let AnonConst {id, value} = constant; AnonConst { id: folder.new_id(id), value: folder.fold_expr(value), } } pub fn noop_fold_expr<T: Folder>(Expr {id, node, span, attrs}: Expr, folder: &mut T) -> Expr { Expr { node: match node { ExprKind::Box(e) => { ExprKind::Box(folder.fold_expr(e)) } ExprKind::Array(exprs) => { ExprKind::Array(folder.fold_exprs(exprs)) } ExprKind::Repeat(expr, count) => { ExprKind::Repeat(folder.fold_expr(expr), folder.fold_anon_const(count)) } ExprKind::Tup(exprs) => ExprKind::Tup(folder.fold_exprs(exprs)), ExprKind::Call(f, args) => { ExprKind::Call(folder.fold_expr(f), folder.fold_exprs(args)) } ExprKind::MethodCall(seg, args) => { ExprKind::MethodCall( PathSegment { ident: folder.fold_ident(seg.ident), parameters: seg.parameters.map(|ps| { ps.map(|ps| folder.fold_path_parameters(ps)) }), }, folder.fold_exprs(args)) } ExprKind::Binary(binop, lhs, rhs) => { ExprKind::Binary(binop, folder.fold_expr(lhs), folder.fold_expr(rhs)) } ExprKind::Unary(binop, ohs) => { ExprKind::Unary(binop, folder.fold_expr(ohs)) } ExprKind::Lit(l) => ExprKind::Lit(l), ExprKind::Cast(expr, ty) => { ExprKind::Cast(folder.fold_expr(expr), folder.fold_ty(ty)) } ExprKind::Type(expr, ty) => { ExprKind::Type(folder.fold_expr(expr), folder.fold_ty(ty)) } ExprKind::AddrOf(m, ohs) => ExprKind::AddrOf(m, folder.fold_expr(ohs)), ExprKind::If(cond, tr, fl) => { ExprKind::If(folder.fold_expr(cond), folder.fold_block(tr), fl.map(|x| folder.fold_expr(x))) } ExprKind::IfLet(pats, expr, tr, fl) => { ExprKind::IfLet(pats.move_map(|pat| folder.fold_pat(pat)), folder.fold_expr(expr), folder.fold_block(tr), fl.map(|x| folder.fold_expr(x))) } ExprKind::While(cond, body, opt_label) => { ExprKind::While(folder.fold_expr(cond), folder.fold_block(body), opt_label.map(|label| folder.fold_label(label))) } ExprKind::WhileLet(pats, expr, body, opt_label) => { ExprKind::WhileLet(pats.move_map(|pat| folder.fold_pat(pat)), folder.fold_expr(expr), folder.fold_block(body), opt_label.map(|label| folder.fold_label(label))) } ExprKind::ForLoop(pat, iter, body, opt_label) => { ExprKind::ForLoop(folder.fold_pat(pat), folder.fold_expr(iter), folder.fold_block(body), opt_label.map(|label| folder.fold_label(label))) } ExprKind::Loop(body, opt_label) => { ExprKind::Loop(folder.fold_block(body), opt_label.map(|label| folder.fold_label(label))) } ExprKind::Match(expr, arms) => { ExprKind::Match(folder.fold_expr(expr), arms.move_map(|x| folder.fold_arm(x))) } ExprKind::Closure(capture_clause, movability, decl, body, span) => { ExprKind::Closure(capture_clause, movability, folder.fold_fn_decl(decl), folder.fold_expr(body), folder.new_span(span)) } ExprKind::Block(blk, opt_label) => { ExprKind::Block(folder.fold_block(blk), opt_label.map(|label| folder.fold_label(label))) } ExprKind::Assign(el, er) => { ExprKind::Assign(folder.fold_expr(el), folder.fold_expr(er)) } ExprKind::AssignOp(op, el, er) => { ExprKind::AssignOp(op, folder.fold_expr(el), folder.fold_expr(er)) } ExprKind::Field(el, ident) => { ExprKind::Field(folder.fold_expr(el), folder.fold_ident(ident)) } ExprKind::Index(el, er) => { ExprKind::Index(folder.fold_expr(el), folder.fold_expr(er)) } ExprKind::Range(e1, e2, lim) => { ExprKind::Range(e1.map(|x| folder.fold_expr(x)), e2.map(|x| folder.fold_expr(x)), lim) } ExprKind::Path(qself, path) => { let qself = qself.map(|QSelf { ty, position }| { QSelf { ty: folder.fold_ty(ty), position, } }); ExprKind::Path(qself, folder.fold_path(path)) } ExprKind::Break(opt_label, opt_expr) => { ExprKind::Break(opt_label.map(|label| folder.fold_label(label)), opt_expr.map(|e| folder.fold_expr(e))) } ExprKind::Continue(opt_label) => { ExprKind::Continue(opt_label.map(|label| folder.fold_label(label))) } ExprKind::Ret(e) => ExprKind::Ret(e.map(|x| folder.fold_expr(x))), ExprKind::InlineAsm(asm) => ExprKind::InlineAsm(asm.map(|asm| { InlineAsm { inputs: asm.inputs.move_map(|(c, input)| { (c, folder.fold_expr(input)) }), outputs: asm.outputs.move_map(|out| { InlineAsmOutput { constraint: out.constraint, expr: folder.fold_expr(out.expr), is_rw: out.is_rw, is_indirect: out.is_indirect, } }), ..asm } })), ExprKind::Mac(mac) => ExprKind::Mac(folder.fold_mac(mac)), ExprKind::Struct(path, fields, maybe_expr) => { ExprKind::Struct(folder.fold_path(path), fields.move_map(|x| folder.fold_field(x)), maybe_expr.map(|x| folder.fold_expr(x))) }, ExprKind::Paren(ex) => { let sub_expr = folder.fold_expr(ex); return Expr { // Nodes that are equal modulo `Paren` sugar no-ops should have the same ids. id: sub_expr.id, node: ExprKind::Paren(sub_expr), span: folder.new_span(span), attrs: fold_attrs(attrs.into(), folder).into(), }; } ExprKind::Yield(ex) => ExprKind::Yield(ex.map(|x| folder.fold_expr(x))), ExprKind::Try(ex) => ExprKind::Try(folder.fold_expr(ex)), ExprKind::Catch(body) => ExprKind::Catch(folder.fold_block(body)), }, id: folder.new_id(id), span: folder.new_span(span), attrs: fold_attrs(attrs.into(), folder).into(), } } pub fn noop_fold_opt_expr<T: Folder>(e: P<Expr>, folder: &mut T) -> Option<P<Expr>> { Some(folder.fold_expr(e)) } pub fn noop_fold_exprs<T: Folder>(es: Vec<P<Expr>>, folder: &mut T) -> Vec<P<Expr>> { es.move_flat_map(|e| folder.fold_opt_expr(e)) } pub fn noop_fold_stmt<T: Folder>(Stmt {node, span, id}: Stmt, folder: &mut T) -> SmallVector<Stmt> { let id = folder.new_id(id); let span = folder.new_span(span); noop_fold_stmt_kind(node, folder).into_iter().map(|node| { Stmt { id: id, node: node, span: span } }).collect() } pub fn noop_fold_stmt_kind<T: Folder>(node: StmtKind, folder: &mut T) -> SmallVector<StmtKind> { match node { StmtKind::Local(local) => SmallVector::one(StmtKind::Local(folder.fold_local(local))), StmtKind::Item(item) => folder.fold_item(item).into_iter().map(StmtKind::Item).collect(), StmtKind::Expr(expr) => { folder.fold_opt_expr(expr).into_iter().map(StmtKind::Expr).collect() } StmtKind::Semi(expr) => { folder.fold_opt_expr(expr).into_iter().map(StmtKind::Semi).collect() } StmtKind::Mac(mac) => SmallVector::one(StmtKind::Mac(mac.map(|(mac, semi, attrs)| { (folder.fold_mac(mac), semi, fold_attrs(attrs.into(), folder).into()) }))), } } pub fn noop_fold_vis<T: Folder>(vis: Visibility, folder: &mut T) -> Visibility { match vis.node { VisibilityKind::Restricted { path, id } => { respan(vis.span, VisibilityKind::Restricted { path: path.map(|path| folder.fold_path(path)), id: folder.new_id(id), }) } _ => vis, } } #[cfg(test)] mod tests { use std::io; use ast::{self, Ident}; use util::parser_testing::{string_to_crate, matches_codepattern}; use print::pprust; use fold; use with_globals; use super::*; // this version doesn't care about getting comments or docstrings in. fn fake_print_crate(s: &mut pprust::State, krate: &ast::Crate) -> io::Result<()> { s.print_mod(&krate.module, &krate.attrs) } // change every identifier to "zz" struct ToZzIdentFolder; impl Folder for ToZzIdentFolder { fn fold_ident(&mut self, _: ast::Ident) -> ast::Ident { Ident::from_str("zz") } fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { fold::noop_fold_mac(mac, self) } } // maybe add to expand.rs... macro_rules! assert_pred { ($pred:expr, $predname:expr, $a:expr , $b:expr) => ( { let pred_val = $pred; let a_val = $a; let b_val = $b; if !(pred_val(&a_val, &b_val)) { panic!("expected args satisfying {}, got {} and {}", $predname, a_val, b_val); } } ) } // make sure idents get transformed everywhere #[test] fn ident_transformation () { with_globals(|| { let mut zz_fold = ToZzIdentFolder; let ast = string_to_crate( "#[a] mod b {fn c (d : e, f : g) {h!(i,j,k);l;m}}".to_string()); let folded_crate = zz_fold.fold_crate(ast); assert_pred!( matches_codepattern, "matches_codepattern", pprust::to_string(|s| fake_print_crate(s, &folded_crate)), "#[zz]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string()); }) } // even inside macro defs.... #[test] fn ident_transformation_in_defs () { with_globals(|| { let mut zz_fold = ToZzIdentFolder; let ast = string_to_crate( "macro_rules! a {(b $c:expr $(d $e:token)f+ => \ (g $(d $d $e)+))} ".to_string()); let folded_crate = zz_fold.fold_crate(ast); assert_pred!( matches_codepattern, "matches_codepattern", pprust::to_string(|s| fake_print_crate(s, &folded_crate)), "macro_rules! zz((zz$zz:zz$(zz $zz:zz)zz+=>(zz$(zz$zz$zz)+)));".to_string()); }) } }
37.496261
100
0.551734
8f1e184ed22e53410e0027a05ac48cc88cf3a161
250
#![allow(invalid_value)] fn main() { trait T { } #[derive(Debug)] struct S { x: * mut dyn T } dbg!(S { x: unsafe { std::mem::transmute((0usize, 0usize)) } }); //~ ERROR: encountered dangling or unaligned vtable pointer }
22.727273
128
0.568
d5261fde49786103928718da0d2c6fa6f76b5a58
113
quick_error! { #[derive(Debug)] pub enum ServerError { RxClosed {} NotStarted {} } }
14.125
26
0.513274
ebfd26d6f8196cea9ae0999dd10a4675b8df559a
4,893
#[doc = "Register `PROFILINGENABLE` reader"] pub struct R(crate::R<PROFILINGENABLE_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PROFILINGENABLE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PROFILINGENABLE_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PROFILINGENABLE_SPEC>) -> Self { R(reader) } } #[doc = "Register `PROFILINGENABLE` writer"] pub struct W(crate::W<PROFILINGENABLE_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PROFILINGENABLE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PROFILINGENABLE_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PROFILINGENABLE_SPEC>) -> Self { W(writer) } } #[doc = "Enable the profiling counters\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ENABLE_A { #[doc = "0: Disable profiling"] DISABLE = 0, #[doc = "1: Enable profiling"] ENABLE = 1, } impl From<ENABLE_A> for bool { #[inline(always)] fn from(variant: ENABLE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `ENABLE` reader - Enable the profiling counters"] pub struct ENABLE_R(crate::FieldReader<bool, ENABLE_A>); impl ENABLE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { ENABLE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ENABLE_A { match self.bits { false => ENABLE_A::DISABLE, true => ENABLE_A::ENABLE, } } #[doc = "Checks if the value of the field is `DISABLE`"] #[inline(always)] pub fn is_disable(&self) -> bool { **self == ENABLE_A::DISABLE } #[doc = "Checks if the value of the field is `ENABLE`"] #[inline(always)] pub fn is_enable(&self) -> bool { **self == ENABLE_A::ENABLE } } impl core::ops::Deref for ENABLE_R { type Target = crate::FieldReader<bool, ENABLE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `ENABLE` writer - Enable the profiling counters"] pub struct ENABLE_W<'a> { w: &'a mut W, } impl<'a> ENABLE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ENABLE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable profiling"] #[inline(always)] pub fn disable(self) -> &'a mut W { self.variant(ENABLE_A::DISABLE) } #[doc = "Enable profiling"] #[inline(always)] pub fn enable(self) -> &'a mut W { self.variant(ENABLE_A::ENABLE) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } impl R { #[doc = "Bit 0 - Enable the profiling counters"] #[inline(always)] pub fn enable(&self) -> ENABLE_R { ENABLE_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Enable the profiling counters"] #[inline(always)] pub fn enable(&mut self) -> ENABLE_W { ENABLE_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Enable the profiling counters.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [profilingenable](index.html) module"] pub struct PROFILINGENABLE_SPEC; impl crate::RegisterSpec for PROFILINGENABLE_SPEC { type Ux = u32; } #[doc = "`read()` method returns [profilingenable::R](R) reader structure"] impl crate::Readable for PROFILINGENABLE_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [profilingenable::W](W) writer structure"] impl crate::Writable for PROFILINGENABLE_SPEC { type Writer = W; } #[doc = "`reset()` method sets PROFILINGENABLE to value 0"] impl crate::Resettable for PROFILINGENABLE_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
30.391304
426
0.60045
187bd2350fd490e809a1ff08c4763aac7319ba76
10,272
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::INTENCLR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `RXPTRUPD`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RXPTRUPDR { #[doc = "Read: Disabled"] DISABLED, #[doc = "Read: Enabled"] ENABLED, } impl RXPTRUPDR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { RXPTRUPDR::DISABLED => false, RXPTRUPDR::ENABLED => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> RXPTRUPDR { match value { false => RXPTRUPDR::DISABLED, true => RXPTRUPDR::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline] pub fn is_disabled(&self) -> bool { *self == RXPTRUPDR::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline] pub fn is_enabled(&self) -> bool { *self == RXPTRUPDR::ENABLED } } #[doc = "Possible values of the field `STOPPED`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STOPPEDR { #[doc = "Read: Disabled"] DISABLED, #[doc = "Read: Enabled"] ENABLED, } impl STOPPEDR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { STOPPEDR::DISABLED => false, STOPPEDR::ENABLED => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> STOPPEDR { match value { false => STOPPEDR::DISABLED, true => STOPPEDR::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline] pub fn is_disabled(&self) -> bool { *self == STOPPEDR::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline] pub fn is_enabled(&self) -> bool { *self == STOPPEDR::ENABLED } } #[doc = "Possible values of the field `TXPTRUPD`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TXPTRUPDR { #[doc = "Read: Disabled"] DISABLED, #[doc = "Read: Enabled"] ENABLED, } impl TXPTRUPDR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TXPTRUPDR::DISABLED => false, TXPTRUPDR::ENABLED => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TXPTRUPDR { match value { false => TXPTRUPDR::DISABLED, true => TXPTRUPDR::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline] pub fn is_disabled(&self) -> bool { *self == TXPTRUPDR::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline] pub fn is_enabled(&self) -> bool { *self == TXPTRUPDR::ENABLED } } #[doc = "Values that can be written to the field `RXPTRUPD`"] pub enum RXPTRUPDW { #[doc = "Disable"] CLEAR, } impl RXPTRUPDW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { RXPTRUPDW::CLEAR => true, } } } #[doc = r" Proxy"] pub struct _RXPTRUPDW<'a> { w: &'a mut W, } impl<'a> _RXPTRUPDW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: RXPTRUPDW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Disable"] #[inline] pub fn clear(self) -> &'a mut W { self.variant(RXPTRUPDW::CLEAR) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `STOPPED`"] pub enum STOPPEDW { #[doc = "Disable"] CLEAR, } impl STOPPEDW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { STOPPEDW::CLEAR => true, } } } #[doc = r" Proxy"] pub struct _STOPPEDW<'a> { w: &'a mut W, } impl<'a> _STOPPEDW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: STOPPEDW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Disable"] #[inline] pub fn clear(self) -> &'a mut W { self.variant(STOPPEDW::CLEAR) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TXPTRUPD`"] pub enum TXPTRUPDW { #[doc = "Disable"] CLEAR, } impl TXPTRUPDW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TXPTRUPDW::CLEAR => true, } } } #[doc = r" Proxy"] pub struct _TXPTRUPDW<'a> { w: &'a mut W, } impl<'a> _TXPTRUPDW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TXPTRUPDW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Disable"] #[inline] pub fn clear(self) -> &'a mut W { self.variant(TXPTRUPDW::CLEAR) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 1 - Write '1' to disable interrupt for RXPTRUPD event"] #[inline] pub fn rxptrupd(&self) -> RXPTRUPDR { RXPTRUPDR::_from({ const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 2 - Write '1' to disable interrupt for STOPPED event"] #[inline] pub fn stopped(&self) -> STOPPEDR { STOPPEDR::_from({ const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 5 - Write '1' to disable interrupt for TXPTRUPD event"] #[inline] pub fn txptrupd(&self) -> TXPTRUPDR { TXPTRUPDR::_from({ const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 1 - Write '1' to disable interrupt for RXPTRUPD event"] #[inline] pub fn rxptrupd(&mut self) -> _RXPTRUPDW { _RXPTRUPDW { w: self } } #[doc = "Bit 2 - Write '1' to disable interrupt for STOPPED event"] #[inline] pub fn stopped(&mut self) -> _STOPPEDW { _STOPPEDW { w: self } } #[doc = "Bit 5 - Write '1' to disable interrupt for TXPTRUPD event"] #[inline] pub fn txptrupd(&mut self) -> _TXPTRUPDW { _TXPTRUPDW { w: self } } }
25.809045
72
0.514895
de6f8a1d39063f3991431978f3cb185aa7b690cd
2,346
use futures::{ channel::mpsc::{UnboundedReceiver, UnboundedSender}, executor, try_join, }; use rumpsteak::{ channel::Bidirectional, session, try_session, End, Message, Receive, Role, Roles, Send, }; use std::{error::Error, result}; type Result<T> = result::Result<T, Box<dyn Error>>; type Channel = Bidirectional<UnboundedSender<Label>, UnboundedReceiver<Label>>; #[derive(Roles)] struct Roles(A, B, C); #[derive(Role)] #[message(Label)] struct A(#[route(B)] Channel, #[route(C)] Channel); #[derive(Role)] #[message(Label)] struct B(#[route(A)] Channel, #[route(C)] Channel); #[derive(Role)] #[message(Label)] struct C(#[route(A)] Channel, #[route(B)] Channel); #[derive(Message)] enum Label { Add(Add), Sum(Sum), } struct Add(i32); struct Sum(i32); #[session] type AdderA = Send<B, Add, Receive<B, Add, Send<C, Add, Receive<C, Sum, End>>>>; #[session] type AdderB = Receive<A, Add, Send<A, Add, Send<C, Add, Receive<C, Sum, End>>>>; #[session] type AdderC = Receive<A, Add, Receive<B, Add, Send<A, Sum, Send<B, Sum, End>>>>; async fn adder_a(role: &mut A) -> Result<()> { try_session(role, |s: AdderA<'_, _>| async { let x = 2; let s = s.send(Add(x)).await?; let (Add(y), s) = s.receive().await?; let s = s.send(Add(y)).await?; let (Sum(z), s) = s.receive().await?; println!("{} + {} = {}", x, y, z); assert_eq!(z, 5); Ok(((), s)) }) .await } async fn adder_b(role: &mut B) -> Result<()> { try_session(role, |s: AdderB<'_, _>| async { let (Add(y), s) = s.receive().await?; let x = 3; let s = s.send(Add(x)).await?; let s = s.send(Add(y)).await?; let (Sum(z), s) = s.receive().await?; println!("{} + {} = {}", x, y, z); assert_eq!(z, 5); Ok(((), s)) }) .await } async fn adder_c(role: &mut C) -> Result<()> { try_session(role, |s: AdderC<'_, _>| async { let (Add(x), s) = s.receive().await?; let (Add(y), s) = s.receive().await?; let z = x + y; let s = s.send(Sum(z)).await?; Ok(((), s.send(Sum(z)).await?)) }) .await } fn main() { let Roles(mut a, mut b, mut c) = Roles::default(); executor::block_on(async { try_join!(adder_a(&mut a), adder_b(&mut b), adder_c(&mut c)).unwrap(); }); }
25.5
91
0.546462
0a363fddd53124f6d9cc7218e88d552b94e1469b
6,018
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use dep_graph::DepNode; use hir::map as ast_map; use hir::def_id::{CRATE_DEF_INDEX}; use session::{config, Session}; use syntax::ast::NodeId; use syntax::attr; use syntax::entry::EntryPointType; use syntax_pos::Span; use hir::{Item, ItemFn}; use hir::intravisit::Visitor; struct EntryContext<'a, 'tcx: 'a> { session: &'a Session, map: &'a ast_map::Map<'tcx>, // The top-level function called 'main' main_fn: Option<(NodeId, Span)>, // The function that has attribute named 'main' attr_main_fn: Option<(NodeId, Span)>, // The function that has the attribute 'start' on it start_fn: Option<(NodeId, Span)>, // The functions that one might think are 'main' but aren't, e.g. // main functions not defined at the top level. For diagnostics. non_main_fns: Vec<(NodeId, Span)> , } impl<'a, 'tcx> Visitor<'tcx> for EntryContext<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx Item) { let def_id = self.map.local_def_id(item.id); let def_key = self.map.def_key(def_id); let at_root = def_key.parent == Some(CRATE_DEF_INDEX); find_item(item, self, at_root); } } pub fn find_entry_point(session: &Session, ast_map: &ast_map::Map) { let _task = ast_map.dep_graph.in_task(DepNode::EntryPoint); let any_exe = session.crate_types.borrow().iter().any(|ty| { *ty == config::CrateTypeExecutable }); if !any_exe { // No need to find a main function return } // If the user wants no main function at all, then stop here. if attr::contains_name(&ast_map.krate().attrs, "no_main") { session.entry_type.set(Some(config::EntryNone)); return } let mut ctxt = EntryContext { session: session, map: ast_map, main_fn: None, attr_main_fn: None, start_fn: None, non_main_fns: Vec::new(), }; ast_map.krate().visit_all_items(&mut ctxt); configure_main(&mut ctxt); } // Beware, this is duplicated in libsyntax/entry.rs, make sure to keep // them in sync. fn entry_point_type(item: &Item, at_root: bool) -> EntryPointType { match item.node { ItemFn(..) => { if attr::contains_name(&item.attrs, "start") { EntryPointType::Start } else if attr::contains_name(&item.attrs, "main") { EntryPointType::MainAttr } else if item.name.as_str() == "main" { if at_root { // This is a top-level function so can be 'main' EntryPointType::MainNamed } else { EntryPointType::OtherMain } } else { EntryPointType::None } } _ => EntryPointType::None, } } fn find_item(item: &Item, ctxt: &mut EntryContext, at_root: bool) { match entry_point_type(item, at_root) { EntryPointType::MainNamed => { if ctxt.main_fn.is_none() { ctxt.main_fn = Some((item.id, item.span)); } else { span_err!(ctxt.session, item.span, E0136, "multiple 'main' functions"); } }, EntryPointType::OtherMain => { ctxt.non_main_fns.push((item.id, item.span)); }, EntryPointType::MainAttr => { if ctxt.attr_main_fn.is_none() { ctxt.attr_main_fn = Some((item.id, item.span)); } else { struct_span_err!(ctxt.session, item.span, E0137, "multiple functions with a #[main] attribute") .span_label(item.span, &format!("additional #[main] function")) .span_label(ctxt.attr_main_fn.unwrap().1, &format!("first #[main] function")) .emit(); } }, EntryPointType::Start => { if ctxt.start_fn.is_none() { ctxt.start_fn = Some((item.id, item.span)); } else { span_err!(ctxt.session, item.span, E0138, "multiple 'start' functions"); } }, EntryPointType::None => () } } fn configure_main(this: &mut EntryContext) { if this.start_fn.is_some() { *this.session.entry_fn.borrow_mut() = this.start_fn; this.session.entry_type.set(Some(config::EntryStart)); } else if this.attr_main_fn.is_some() { *this.session.entry_fn.borrow_mut() = this.attr_main_fn; this.session.entry_type.set(Some(config::EntryMain)); } else if this.main_fn.is_some() { *this.session.entry_fn.borrow_mut() = this.main_fn; this.session.entry_type.set(Some(config::EntryMain)); } else { // No main function let mut err = this.session.struct_err("main function not found"); if !this.non_main_fns.is_empty() { // There were some functions named 'main' though. Try to give the user a hint. err.note("the main function must be defined at the crate level \ but you have one or more functions named 'main' that are not \ defined at the crate level. Either move the definition or \ attach the `#[main]` attribute to override this behavior."); for &(_, span) in &this.non_main_fns { err.span_note(span, "here is a function named 'main'"); } err.emit(); this.session.abort_if_errors(); } else { err.emit(); } } }
34.988372
93
0.581256
90c518da489d80b3ad34a8286e0d2feb9a4eb3b7
18,561
//! CreateTopicsRequest //! //! See the schema for this message [here](https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/CreateTopicsRequest.json). // WARNING: the items of this module are generated and should not be edited directly #![allow(unused)] use std::borrow::Borrow; use std::collections::BTreeMap; use bytes::Bytes; use log::error; use uuid::Uuid; use crate::protocol::{ Encodable, Decodable, MapEncodable, MapDecodable, Encoder, Decoder, EncodeError, DecodeError, Message, HeaderVersion, VersionRange, types, write_unknown_tagged_fields, compute_unknown_tagged_fields_size, StrBytes, buf::{ByteBuf, ByteBufMut} }; /// Valid versions: 0-7 #[derive(Debug, Clone, PartialEq)] pub struct CreatableReplicaAssignment { /// The brokers to place the partition on. /// /// Supported API versions: 0-7 pub broker_ids: Vec<super::BrokerId>, /// Other tagged fields pub unknown_tagged_fields: BTreeMap<i32, Vec<u8>>, } impl MapEncodable for CreatableReplicaAssignment { type Key = i32; fn encode<B: ByteBufMut>(&self, key: &Self::Key, buf: &mut B, version: i16) -> Result<(), EncodeError> { types::Int32.encode(buf, key)?; if version >= 5 { types::CompactArray(types::Int32).encode(buf, &self.broker_ids)?; } else { types::Array(types::Int32).encode(buf, &self.broker_ids)?; } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } types::UnsignedVarInt.encode(buf, num_tagged_fields as u32)?; write_unknown_tagged_fields(buf, 0.., &self.unknown_tagged_fields)?; } Ok(()) } fn compute_size(&self, key: &Self::Key, version: i16) -> Result<usize, EncodeError> { let mut total_size = 0; total_size += types::Int32.compute_size(key)?; if version >= 5 { total_size += types::CompactArray(types::Int32).compute_size(&self.broker_ids)?; } else { total_size += types::Array(types::Int32).compute_size(&self.broker_ids)?; } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } total_size += types::UnsignedVarInt.compute_size(num_tagged_fields as u32)?; total_size += compute_unknown_tagged_fields_size(&self.unknown_tagged_fields)?; } Ok(total_size) } } impl MapDecodable for CreatableReplicaAssignment { type Key = i32; fn decode<B: ByteBuf>(buf: &mut B, version: i16) -> Result<(Self::Key, Self), DecodeError> { let key_field = types::Int32.decode(buf)?; let broker_ids = if version >= 5 { types::CompactArray(types::Int32).decode(buf)? } else { types::Array(types::Int32).decode(buf)? }; let mut unknown_tagged_fields = BTreeMap::new(); if version >= 5 { let num_tagged_fields = types::UnsignedVarInt.decode(buf)?; for _ in 0..num_tagged_fields { let tag: u32 = types::UnsignedVarInt.decode(buf)?; let size: u32 = types::UnsignedVarInt.decode(buf)?; let mut unknown_value = vec![0; size as usize]; buf.try_copy_to_slice(&mut unknown_value)?; unknown_tagged_fields.insert(tag as i32, unknown_value); } } Ok((key_field, Self { broker_ids, unknown_tagged_fields, })) } } impl Default for CreatableReplicaAssignment { fn default() -> Self { Self { broker_ids: Default::default(), unknown_tagged_fields: BTreeMap::new(), } } } impl Message for CreatableReplicaAssignment { const VERSIONS: VersionRange = VersionRange { min: 0, max: 7 }; } /// Valid versions: 0-7 #[derive(Debug, Clone, PartialEq)] pub struct CreateableTopicConfig { /// The configuration value. /// /// Supported API versions: 0-7 pub value: Option<StrBytes>, /// Other tagged fields pub unknown_tagged_fields: BTreeMap<i32, Vec<u8>>, } impl MapEncodable for CreateableTopicConfig { type Key = StrBytes; fn encode<B: ByteBufMut>(&self, key: &Self::Key, buf: &mut B, version: i16) -> Result<(), EncodeError> { if version >= 5 { types::CompactString.encode(buf, key)?; } else { types::String.encode(buf, key)?; } if version >= 5 { types::CompactString.encode(buf, &self.value)?; } else { types::String.encode(buf, &self.value)?; } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } types::UnsignedVarInt.encode(buf, num_tagged_fields as u32)?; write_unknown_tagged_fields(buf, 0.., &self.unknown_tagged_fields)?; } Ok(()) } fn compute_size(&self, key: &Self::Key, version: i16) -> Result<usize, EncodeError> { let mut total_size = 0; if version >= 5 { total_size += types::CompactString.compute_size(key)?; } else { total_size += types::String.compute_size(key)?; } if version >= 5 { total_size += types::CompactString.compute_size(&self.value)?; } else { total_size += types::String.compute_size(&self.value)?; } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } total_size += types::UnsignedVarInt.compute_size(num_tagged_fields as u32)?; total_size += compute_unknown_tagged_fields_size(&self.unknown_tagged_fields)?; } Ok(total_size) } } impl MapDecodable for CreateableTopicConfig { type Key = StrBytes; fn decode<B: ByteBuf>(buf: &mut B, version: i16) -> Result<(Self::Key, Self), DecodeError> { let key_field = if version >= 5 { types::CompactString.decode(buf)? } else { types::String.decode(buf)? }; let value = if version >= 5 { types::CompactString.decode(buf)? } else { types::String.decode(buf)? }; let mut unknown_tagged_fields = BTreeMap::new(); if version >= 5 { let num_tagged_fields = types::UnsignedVarInt.decode(buf)?; for _ in 0..num_tagged_fields { let tag: u32 = types::UnsignedVarInt.decode(buf)?; let size: u32 = types::UnsignedVarInt.decode(buf)?; let mut unknown_value = vec![0; size as usize]; buf.try_copy_to_slice(&mut unknown_value)?; unknown_tagged_fields.insert(tag as i32, unknown_value); } } Ok((key_field, Self { value, unknown_tagged_fields, })) } } impl Default for CreateableTopicConfig { fn default() -> Self { Self { value: Some(Default::default()), unknown_tagged_fields: BTreeMap::new(), } } } impl Message for CreateableTopicConfig { const VERSIONS: VersionRange = VersionRange { min: 0, max: 7 }; } /// Valid versions: 0-7 #[derive(Debug, Clone, PartialEq)] pub struct CreatableTopic { /// The number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions. /// /// Supported API versions: 0-7 pub num_partitions: i32, /// The number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor. /// /// Supported API versions: 0-7 pub replication_factor: i16, /// The manual partition assignment, or the empty array if we are using automatic assignment. /// /// Supported API versions: 0-7 pub assignments: indexmap::IndexMap<i32, CreatableReplicaAssignment>, /// The custom topic configurations to set. /// /// Supported API versions: 0-7 pub configs: indexmap::IndexMap<StrBytes, CreateableTopicConfig>, /// Other tagged fields pub unknown_tagged_fields: BTreeMap<i32, Vec<u8>>, } impl MapEncodable for CreatableTopic { type Key = super::TopicName; fn encode<B: ByteBufMut>(&self, key: &Self::Key, buf: &mut B, version: i16) -> Result<(), EncodeError> { if version >= 5 { types::CompactString.encode(buf, key)?; } else { types::String.encode(buf, key)?; } types::Int32.encode(buf, &self.num_partitions)?; types::Int16.encode(buf, &self.replication_factor)?; if version >= 5 { types::CompactArray(types::Struct { version }).encode(buf, &self.assignments)?; } else { types::Array(types::Struct { version }).encode(buf, &self.assignments)?; } if version >= 5 { types::CompactArray(types::Struct { version }).encode(buf, &self.configs)?; } else { types::Array(types::Struct { version }).encode(buf, &self.configs)?; } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } types::UnsignedVarInt.encode(buf, num_tagged_fields as u32)?; write_unknown_tagged_fields(buf, 0.., &self.unknown_tagged_fields)?; } Ok(()) } fn compute_size(&self, key: &Self::Key, version: i16) -> Result<usize, EncodeError> { let mut total_size = 0; if version >= 5 { total_size += types::CompactString.compute_size(key)?; } else { total_size += types::String.compute_size(key)?; } total_size += types::Int32.compute_size(&self.num_partitions)?; total_size += types::Int16.compute_size(&self.replication_factor)?; if version >= 5 { total_size += types::CompactArray(types::Struct { version }).compute_size(&self.assignments)?; } else { total_size += types::Array(types::Struct { version }).compute_size(&self.assignments)?; } if version >= 5 { total_size += types::CompactArray(types::Struct { version }).compute_size(&self.configs)?; } else { total_size += types::Array(types::Struct { version }).compute_size(&self.configs)?; } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } total_size += types::UnsignedVarInt.compute_size(num_tagged_fields as u32)?; total_size += compute_unknown_tagged_fields_size(&self.unknown_tagged_fields)?; } Ok(total_size) } } impl MapDecodable for CreatableTopic { type Key = super::TopicName; fn decode<B: ByteBuf>(buf: &mut B, version: i16) -> Result<(Self::Key, Self), DecodeError> { let key_field = if version >= 5 { types::CompactString.decode(buf)? } else { types::String.decode(buf)? }; let num_partitions = types::Int32.decode(buf)?; let replication_factor = types::Int16.decode(buf)?; let assignments = if version >= 5 { types::CompactArray(types::Struct { version }).decode(buf)? } else { types::Array(types::Struct { version }).decode(buf)? }; let configs = if version >= 5 { types::CompactArray(types::Struct { version }).decode(buf)? } else { types::Array(types::Struct { version }).decode(buf)? }; let mut unknown_tagged_fields = BTreeMap::new(); if version >= 5 { let num_tagged_fields = types::UnsignedVarInt.decode(buf)?; for _ in 0..num_tagged_fields { let tag: u32 = types::UnsignedVarInt.decode(buf)?; let size: u32 = types::UnsignedVarInt.decode(buf)?; let mut unknown_value = vec![0; size as usize]; buf.try_copy_to_slice(&mut unknown_value)?; unknown_tagged_fields.insert(tag as i32, unknown_value); } } Ok((key_field, Self { num_partitions, replication_factor, assignments, configs, unknown_tagged_fields, })) } } impl Default for CreatableTopic { fn default() -> Self { Self { num_partitions: 0, replication_factor: 0, assignments: Default::default(), configs: Default::default(), unknown_tagged_fields: BTreeMap::new(), } } } impl Message for CreatableTopic { const VERSIONS: VersionRange = VersionRange { min: 0, max: 7 }; } /// Valid versions: 0-7 #[derive(Debug, Clone, PartialEq)] pub struct CreateTopicsRequest { /// The topics to create. /// /// Supported API versions: 0-7 pub topics: indexmap::IndexMap<super::TopicName, CreatableTopic>, /// How long to wait in milliseconds before timing out the request. /// /// Supported API versions: 0-7 pub timeout_ms: i32, /// If true, check that the topics can be created as specified, but don't create anything. /// /// Supported API versions: 1-7 pub validate_only: bool, /// Other tagged fields pub unknown_tagged_fields: BTreeMap<i32, Vec<u8>>, } impl Encodable for CreateTopicsRequest { fn encode<B: ByteBufMut>(&self, buf: &mut B, version: i16) -> Result<(), EncodeError> { if version >= 5 { types::CompactArray(types::Struct { version }).encode(buf, &self.topics)?; } else { types::Array(types::Struct { version }).encode(buf, &self.topics)?; } types::Int32.encode(buf, &self.timeout_ms)?; if version >= 1 { types::Boolean.encode(buf, &self.validate_only)?; } else { if self.validate_only { return Err(EncodeError) } } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } types::UnsignedVarInt.encode(buf, num_tagged_fields as u32)?; write_unknown_tagged_fields(buf, 0.., &self.unknown_tagged_fields)?; } Ok(()) } fn compute_size(&self, version: i16) -> Result<usize, EncodeError> { let mut total_size = 0; if version >= 5 { total_size += types::CompactArray(types::Struct { version }).compute_size(&self.topics)?; } else { total_size += types::Array(types::Struct { version }).compute_size(&self.topics)?; } total_size += types::Int32.compute_size(&self.timeout_ms)?; if version >= 1 { total_size += types::Boolean.compute_size(&self.validate_only)?; } else { if self.validate_only { return Err(EncodeError) } } if version >= 5 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } total_size += types::UnsignedVarInt.compute_size(num_tagged_fields as u32)?; total_size += compute_unknown_tagged_fields_size(&self.unknown_tagged_fields)?; } Ok(total_size) } } impl Decodable for CreateTopicsRequest { fn decode<B: ByteBuf>(buf: &mut B, version: i16) -> Result<Self, DecodeError> { let topics = if version >= 5 { types::CompactArray(types::Struct { version }).decode(buf)? } else { types::Array(types::Struct { version }).decode(buf)? }; let timeout_ms = types::Int32.decode(buf)?; let validate_only = if version >= 1 { types::Boolean.decode(buf)? } else { false }; let mut unknown_tagged_fields = BTreeMap::new(); if version >= 5 { let num_tagged_fields = types::UnsignedVarInt.decode(buf)?; for _ in 0..num_tagged_fields { let tag: u32 = types::UnsignedVarInt.decode(buf)?; let size: u32 = types::UnsignedVarInt.decode(buf)?; let mut unknown_value = vec![0; size as usize]; buf.try_copy_to_slice(&mut unknown_value)?; unknown_tagged_fields.insert(tag as i32, unknown_value); } } Ok(Self { topics, timeout_ms, validate_only, unknown_tagged_fields, }) } } impl Default for CreateTopicsRequest { fn default() -> Self { Self { topics: Default::default(), timeout_ms: 60000, validate_only: false, unknown_tagged_fields: BTreeMap::new(), } } } impl Message for CreateTopicsRequest { const VERSIONS: VersionRange = VersionRange { min: 0, max: 7 }; } impl HeaderVersion for CreateTopicsRequest { fn header_version(version: i16) -> i16 { if version >= 5 { 2 } else { 1 } } }
36.609467
178
0.58596
08422bb6aaf899664650f08110cf033d05630873
10,008
use crate::block::{Block, ItemContent}; use crate::block_store::{BlockStore, SquashResult, StateVector}; use crate::doc::Options; use crate::event::{EventHandler, UpdateEvent}; use crate::id_set::DeleteSet; use crate::types::{Branch, BranchPtr, Path, PathSegment, TypePtr, TypeRefs}; use crate::update::PendingUpdate; use crate::updates::encoder::{Encode, Encoder}; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::ops::Deref; use std::rc::Rc; /// Store is a core element of a document. It contains all of the information, like block store /// map of root types, pending updates waiting to be applied once a missing update information /// arrives and all subscribed callbacks. pub(crate) struct Store { pub options: Options, /// Root types (a.k.a. top-level types). These types are defined by users at the document level, /// they have their own unique names and represent core shared types that expose operations /// which can be called concurrently by remote peers in a conflict-free manner. pub types: HashMap<Rc<str>, Box<Branch>>, /// A block store of a current document. It represent all blocks (inserted or tombstoned /// operations) integrated - and therefore visible - into a current document. pub(crate) blocks: BlockStore, /// A pending update. It contains blocks, which are not yet integrated into `blocks`, usually /// because due to issues in update exchange, there were some missing blocks that need to be /// integrated first before the data from `pending` can be applied safely. pub pending: Option<PendingUpdate>, /// A pending delete set. Just like `pending`, it contains deleted ranges of blocks that have /// not been yet applied due to missing blocks that prevent `pending` update to be integrated /// into `blocks`. pub pending_ds: Option<DeleteSet>, /// A subscription handler. It contains all callbacks with registered by user functions that /// are supposed to be called, once a new update arrives. pub(crate) update_events: EventHandler<UpdateEvent>, } impl Store { /// Create a new empty store in context of a given `client_id`. pub fn new(options: Options) -> Self { Store { options, types: Default::default(), blocks: BlockStore::new(), pending: None, pending_ds: None, update_events: EventHandler::new(), } } /// Get the latest clock sequence number observed and integrated into a current store client. /// This is exclusive value meaning it describes a clock value of the beginning of the next /// block that's about to be inserted. You cannot use that clock value to find any existing /// block content. pub fn get_local_state(&self) -> u32 { self.blocks.get_state(&self.options.client_id) } /// Returns a branch reference to a complex type identified by its pointer. Returns `None` if /// no such type could be found or was ever defined. pub fn get_type<K: Into<Rc<str>>>(&self, key: K) -> Option<BranchPtr> { let ptr = self.types.get(&key.into())?; Some(BranchPtr::from(ptr)) } /// Returns a branch reference to a complex type identified by its pointer. Returns `None` if /// no such type could be found or was ever defined. pub fn get_or_create_type<K: Into<Rc<str>>>( &mut self, key: K, node_name: Option<String>, type_ref: TypeRefs, ) -> BranchPtr { let key = key.into(); match self.types.entry(key.clone()) { Entry::Occupied(mut e) => BranchPtr::from(e.get_mut()), Entry::Vacant(e) => { let mut branch = Branch::new(type_ref, node_name); let branch_ref = BranchPtr::from(&mut branch); e.insert(branch); branch_ref } } } pub(crate) fn get_type_key(&self, ptr: BranchPtr) -> Option<&Rc<str>> { let branch = ptr.deref() as *const Branch; for (k, v) in self.types.iter() { let target = v.as_ref() as *const Branch; if std::ptr::eq(target, branch) { return Some(k); } } None } /// Compute a diff to sync with another client. /// /// This is the most efficient method to sync with another client by only /// syncing the differences. /// /// The sync protocol in Yrs/js is: /// * Send StateVector to the other client. /// * The other client comutes a minimal diff to sync by using the StateVector. pub fn encode_diff<E: Encoder>(&self, remote_sv: &StateVector, encoder: &mut E) { //TODO: this could be actually 2 steps: // 1. create Diff of block store and remote state vector (it can have lifetime of bock store) // 2. make Diff implement Encode trait and encode it // this way we can add some extra utility method on top of Diff (like introspection) without need of decoding it. self.write_blocks(remote_sv, encoder); let delete_set = DeleteSet::from(&self.blocks); delete_set.encode(encoder); } pub(crate) fn write_blocks<E: Encoder>(&self, remote_sv: &StateVector, encoder: &mut E) { let local_sv = self.blocks.get_state_vector(); let mut diff = Self::diff_state_vectors(&local_sv, remote_sv); // Write items with higher client ids first // This heavily improves the conflict algorithm. diff.sort_by(|a, b| b.0.cmp(&a.0)); encoder.write_uvar(diff.len()); for (client, clock) in diff { let blocks = self.blocks.get(&client).unwrap(); let clock = clock.max(blocks.first().id().clock); // make sure the first id exists let start = blocks.find_pivot(clock).unwrap(); // write # encoded structs encoder.write_uvar(blocks.integrated_len() - start); encoder.write_client(client); encoder.write_uvar(clock); let first_block = blocks.get(start); // write first struct with an offset first_block.encode(Some(self), encoder); for i in (start + 1)..blocks.integrated_len() { blocks.get(i).encode(Some(self), encoder); } } } fn diff_state_vectors(local_sv: &StateVector, remote_sv: &StateVector) -> Vec<(u64, u32)> { let mut diff = Vec::new(); for (client, &remote_clock) in remote_sv.iter() { let local_clock = local_sv.get(client); if local_clock > remote_clock { diff.push((*client, remote_clock)); } } for (client, _) in local_sv.iter() { if remote_sv.get(client) == 0 { diff.push((*client, 0)); } } diff } pub(crate) fn gc_cleanup(&self, mut compaction: SquashResult) { if let Some(parent_sub) = compaction.parent_sub { if let TypePtr::Branch(mut inner) = compaction.parent { match inner.map.entry(parent_sub.clone()) { Entry::Occupied(mut e) => { let cell = e.get_mut(); if cell.id() == &compaction.old_right { *cell = compaction.replacement; } } Entry::Vacant(e) => { e.insert(compaction.replacement); } } } } if let Some(Block::Item(right)) = compaction.new_right.as_deref_mut() { right.left = Some(compaction.replacement); } } pub fn get_type_from_path(&self, path: &Path) -> Option<BranchPtr> { let mut i = path.iter(); if let Some(PathSegment::Key(root_name)) = i.next() { let mut current = self.get_type(root_name.clone())?; while let Some(segment) = i.next() { match segment { PathSegment::Key(key) => { let child = current.map.get(key)?.as_item()?; if let ItemContent::Type(child_branch) = &child.content { current = child_branch.into(); } else { return None; } } PathSegment::Index(index) => { if let Some((ItemContent::Type(child_branch), _)) = current.get_at(*index) { current = child_branch.into(); } else { return None; } } } } Some(current) } else { None } } } impl Encode for Store { /// Encodes the document state to a binary format. /// /// Document updates are idempotent and commutative. Caveats: /// * It doesn't matter in which order document updates are applied. /// * As long as all clients receive the same document updates, all clients /// end up with the same content. /// * Even if an update contains known information, the unknown information /// is extracted and integrated into the document structure. fn encode<E: Encoder>(&self, encoder: &mut E) { self.encode_diff(&StateVector::default(), encoder) } } impl std::fmt::Display for Store { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Store(ID: {}) {{", self.options.client_id)?; if !self.types.is_empty() { writeln!(f, "\ttypes: {{")?; for (k, v) in self.types.iter() { writeln!(f, "\t\t'{}': {}", k.as_ref(), v)?; } writeln!(f, "\t}}")?; } if !self.blocks.is_empty() { writeln!(f, "\tblocks: {}", self.blocks)?; } writeln!(f, "}}") } }
40.682927
121
0.576439
c1c276a4987de91d04329a4f520854bfd79ec17d
2,507
use core::sync::atomic::{AtomicUsize, Ordering}; use crate::{Controlled, Filter, Seek, Signal}; const PLAY: usize = 0; const PAUSE: usize = 1; const STOP: usize = 2; /// A source that can be paused or permanently stopped pub struct Stop<T: ?Sized> { state: AtomicUsize, inner: T, } impl<T> Stop<T> { pub(crate) fn new(signal: T) -> Self { Self { state: AtomicUsize::new(PLAY), inner: signal, } } } impl<T: ?Sized> Stop<T> { /// Stop the source for good pub(crate) fn stop(&self) { self.state.store(STOP, Ordering::Relaxed); } pub(crate) fn is_paused(&self) -> bool { self.state.load(Ordering::Relaxed) == PAUSE } pub(crate) fn is_stopped(&self) -> bool { self.state.load(Ordering::Relaxed) == STOP } } impl<T: Signal + ?Sized> Signal for Stop<T> { type Frame = T::Frame; fn sample(&self, interval: f32, out: &mut [T::Frame]) { self.inner.sample(interval, out); } fn remaining(&self) -> f32 { let state = self.state.load(Ordering::Relaxed); match state { PLAY => self.inner.remaining(), PAUSE => f32::INFINITY, _ => 0.0, } } #[inline] fn handle_dropped(&self) { self.inner.handle_dropped(); } } impl<T: ?Sized> Filter for Stop<T> { type Inner = T; fn inner(&self) -> &T { &self.inner } } impl<T: ?Sized + Seek> Seek for Stop<T> { fn seek(&self, seconds: f32) { self.inner.seek(seconds); } } /// Thread-safe control for a [`Stop`] filter #[derive(Copy, Clone)] pub struct StopControl<'a>(&'a AtomicUsize); unsafe impl<'a, T: 'a> Controlled<'a> for Stop<T> { type Control = StopControl<'a>; unsafe fn make_control(signal: &'a Stop<T>) -> Self::Control { StopControl(&signal.state) } } impl<'a> StopControl<'a> { /// Suspend playback of the source pub fn pause(&self) { self.0.store(PAUSE, Ordering::Relaxed); } /// Resume the paused source pub fn resume(&self) { self.0.store(PLAY, Ordering::Relaxed); } /// Stop the source for good pub fn stop(&self) { self.0.store(STOP, Ordering::Relaxed); } /// Whether the source is paused pub fn is_paused(&self) -> bool { self.0.load(Ordering::Relaxed) == PAUSE } /// Whether the source has stopped pub fn is_stopped(&self) -> bool { self.0.load(Ordering::Relaxed) == STOP } }
22.383929
66
0.567611
28ce0fa869e95919f39197e32859001413fce829
1,576
use super::*; pub const fn vec2(x: f32, y: f32) -> Vec2 { Vec2 { x, y } } #[derive(Debug, Clone, Copy, Default, PartialEq, PartialOrd)] #[repr(C)] pub struct Vec2 { pub x: f32, pub y: f32, } unsafe impl Pod for Vec2 {} impl Vec2 { #[must_use] pub fn dot(self, other: Self) -> f32 { (self.x * other.x) + (self.y * other.y) } #[must_use] pub fn length_squared(self) -> f32 { self.dot(self) } #[must_use] pub fn length(self) -> f32 { self.length_squared().sqrt() } #[must_use] pub fn normalized(self) -> Self { self / self.length() } /// Applies a circular deadzone effect. /// /// * If the input vector's length is greater than the deadzone range the /// output will be the input value re-scaled appropriately. /// * Otherwise the output is zero. #[must_use] pub fn apply_circular_deadzone(self, deadzone: f32) -> Self { let start_len = self.length(); if start_len > deadzone { let new_len = (start_len - deadzone) / (1.0 - deadzone); self * (new_len / start_len) } else { Self::default() } } #[must_use] pub fn apply_circular_clamp(self) -> Self { if self.length_squared() > 1.0 { self.normalized() } else { self } } } impl Mul<f32> for Vec2 { type Output = Self; #[must_use] fn mul(self, scale: f32) -> Self::Output { Self { x: self.x * scale, y: self.y * scale } } } impl Div<f32> for Vec2 { type Output = Self; #[must_use] fn div(self, scale: f32) -> Self::Output { Self { x: self.x / scale, y: self.y / scale } } }
21.589041
75
0.590102
db6d9b6a427f79cb65b8db4c791f14d7679472b6
28,135
// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use super::_match::{MatchCheckCtxt, Matrix, expand_pattern, is_useful}; use super::_match::Usefulness::*; use super::_match::WitnessPreference::*; use super::{Pattern, PatternContext, PatternError, PatternKind}; use rustc::middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor}; use rustc::middle::expr_use_visitor::{LoanCause, MutateMode}; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization::cmt_; use rustc::middle::region; use rustc::session::Session; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::Substs; use rustc::lint; use rustc_errors::{Applicability, DiagnosticBuilder}; use rustc::util::common::ErrorReported; use rustc::hir::def::*; use rustc::hir::def_id::DefId; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::hir::{self, Pat, PatKind}; use std::slice; use syntax::ast; use syntax::ptr::P; use syntax_pos::{Span, DUMMY_SP, MultiSpan}; struct OuterVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx> } impl<'a, 'tcx> Visitor<'tcx> for OuterVisitor<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::OnlyBodies(&self.tcx.hir) } fn visit_body(&mut self, body: &'tcx hir::Body) { intravisit::walk_body(self, body); let def_id = self.tcx.hir.body_owner_def_id(body.id()); let _ = self.tcx.check_match(def_id); } } pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { tcx.hir.krate().visit_all_item_likes(&mut OuterVisitor { tcx }.as_deep_visitor()); tcx.sess.abort_if_errors(); } pub(crate) fn check_match<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, ) -> Result<(), ErrorReported> { let body_id = if let Some(id) = tcx.hir.as_local_node_id(def_id) { tcx.hir.body_owned_by(id) } else { return Ok(()); }; tcx.sess.track_errors(|| { MatchVisitor { tcx, tables: tcx.body_tables(body_id), region_scope_tree: &tcx.region_scope_tree(def_id), param_env: tcx.param_env(def_id), identity_substs: Substs::identity_for_item(tcx, def_id), }.visit_body(tcx.hir.body(body_id)); }) } fn create_e0004<'a>(sess: &'a Session, sp: Span, error_message: String) -> DiagnosticBuilder<'a> { struct_span_err!(sess, sp, E0004, "{}", &error_message) } struct MatchVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: &'a ty::TypeckTables<'tcx>, param_env: ty::ParamEnv<'tcx>, identity_substs: &'tcx Substs<'tcx>, region_scope_tree: &'a region::ScopeTree, } impl<'a, 'tcx> Visitor<'tcx> for MatchVisitor<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::None } fn visit_expr(&mut self, ex: &'tcx hir::Expr) { intravisit::walk_expr(self, ex); match ex.node { hir::ExprKind::Match(ref scrut, ref arms, source) => { self.check_match(scrut, arms, source); } _ => {} } } fn visit_local(&mut self, loc: &'tcx hir::Local) { intravisit::walk_local(self, loc); self.check_irrefutable(&loc.pat, match loc.source { hir::LocalSource::Normal => "local binding", hir::LocalSource::ForLoopDesugar => "`for` loop binding", }); // Check legality of move bindings and `@` patterns. self.check_patterns(false, slice::from_ref(&loc.pat)); } fn visit_body(&mut self, body: &'tcx hir::Body) { intravisit::walk_body(self, body); for arg in &body.arguments { self.check_irrefutable(&arg.pat, "function argument"); self.check_patterns(false, slice::from_ref(&arg.pat)); } } } impl<'a, 'tcx> PatternContext<'a, 'tcx> { fn report_inlining_errors(&self, pat_span: Span) { for error in &self.errors { match *error { PatternError::StaticInPattern(span) => { self.span_e0158(span, "statics cannot be referenced in patterns") } PatternError::AssociatedConstInPattern(span) => { self.span_e0158(span, "associated consts cannot be referenced in patterns") } PatternError::FloatBug => { // FIXME(#31407) this is only necessary because float parsing is buggy ::rustc::mir::interpret::struct_error( self.tcx.at(pat_span), "could not evaluate float literal (see issue #31407)", ).emit(); } PatternError::NonConstPath(span) => { ::rustc::mir::interpret::struct_error( self.tcx.at(span), "runtime values cannot be referenced in patterns", ).emit(); } } } } fn span_e0158(&self, span: Span, text: &str) { span_err!(self.tcx.sess, span, E0158, "{}", text) } } impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { fn check_patterns(&self, has_guard: bool, pats: &[P<Pat>]) { check_legality_of_move_bindings(self, has_guard, pats); for pat in pats { check_legality_of_bindings_in_at_patterns(self, pat); } } fn check_match( &self, scrut: &hir::Expr, arms: &'tcx [hir::Arm], source: hir::MatchSource) { for arm in arms { // First, check legality of move bindings. self.check_patterns(arm.guard.is_some(), &arm.pats); // Second, if there is a guard on each arm, make sure it isn't // assigning or borrowing anything mutably. if let Some(ref guard) = arm.guard { if self.tcx.check_for_mutation_in_guard_via_ast_walk() { check_for_mutation_in_guard(self, &guard); } } // Third, perform some lints. for pat in &arm.pats { check_for_bindings_named_the_same_as_variants(self, pat); } } let module = self.tcx.hir.get_module_parent(scrut.id); MatchCheckCtxt::create_and_enter(self.tcx, self.param_env, module, |ref mut cx| { let mut have_errors = false; let inlined_arms : Vec<(Vec<_>, _)> = arms.iter().map(|arm| ( arm.pats.iter().map(|pat| { let mut patcx = PatternContext::new(self.tcx, self.param_env.and(self.identity_substs), self.tables); let pattern = expand_pattern(cx, patcx.lower_pattern(&pat)); if !patcx.errors.is_empty() { patcx.report_inlining_errors(pat.span); have_errors = true; } (pattern, &**pat) }).collect(), arm.guard.as_ref().map(|g| match g { hir::Guard::If(ref e) => &**e, }) )).collect(); // Bail out early if inlining failed. if have_errors { return; } // Fourth, check for unreachable arms. check_arms(cx, &inlined_arms, source); // Then, if the match has no arms, check whether the scrutinee // is uninhabited. let pat_ty = self.tables.node_id_to_type(scrut.hir_id); let module = self.tcx.hir.get_module_parent(scrut.id); if inlined_arms.is_empty() { let scrutinee_is_uninhabited = if self.tcx.features().exhaustive_patterns { self.tcx.is_ty_uninhabited_from(module, pat_ty) } else { self.conservative_is_uninhabited(pat_ty) }; if !scrutinee_is_uninhabited { // We know the type is inhabited, so this must be wrong let mut err = create_e0004(self.tcx.sess, scrut.span, format!("non-exhaustive patterns: type `{}` \ is non-empty", pat_ty)); span_help!(&mut err, scrut.span, "ensure that all possible cases are being handled, \ possibly by adding wildcards or more match arms"); err.emit(); } // If the type *is* uninhabited, it's vacuously exhaustive return; } let matrix: Matrix = inlined_arms .iter() .filter(|&&(_, guard)| guard.is_none()) .flat_map(|arm| &arm.0) .map(|pat| vec![pat.0]) .collect(); let scrut_ty = self.tables.node_id_to_type(scrut.hir_id); check_exhaustive(cx, scrut_ty, scrut.span, &matrix); }) } fn conservative_is_uninhabited(&self, scrutinee_ty: Ty<'tcx>) -> bool { // "rustc-1.0-style" uncontentious uninhabitableness check match scrutinee_ty.sty { ty::Never => true, ty::Adt(def, _) => def.variants.is_empty(), _ => false } } fn check_irrefutable(&self, pat: &'tcx Pat, origin: &str) { let module = self.tcx.hir.get_module_parent(pat.id); MatchCheckCtxt::create_and_enter(self.tcx, self.param_env, module, |ref mut cx| { let mut patcx = PatternContext::new(self.tcx, self.param_env.and(self.identity_substs), self.tables); let pattern = patcx.lower_pattern(pat); let pattern_ty = pattern.ty; let pats: Matrix = vec![vec![ expand_pattern(cx, pattern) ]].into_iter().collect(); let wild_pattern = Pattern { ty: pattern_ty, span: DUMMY_SP, kind: box PatternKind::Wild, }; let witness = match is_useful(cx, &pats, &[&wild_pattern], ConstructWitness) { UsefulWithWitness(witness) => witness, NotUseful => return, Useful => bug!() }; let pattern_string = witness[0].single_pattern().to_string(); let mut diag = struct_span_err!( self.tcx.sess, pat.span, E0005, "refutable pattern in {}: `{}` not covered", origin, pattern_string ); let label_msg = match pat.node { PatKind::Path(hir::QPath::Resolved(None, ref path)) if path.segments.len() == 1 && path.segments[0].args.is_none() => { format!("interpreted as {} {} pattern, not new variable", path.def.article(), path.def.kind_name()) } _ => format!("pattern `{}` not covered", pattern_string), }; diag.span_label(pat.span, label_msg); diag.emit(); }); } } fn check_for_bindings_named_the_same_as_variants(cx: &MatchVisitor, pat: &Pat) { pat.walk(|p| { if let PatKind::Binding(_, _, ident, None) = p.node { if let Some(&bm) = cx.tables.pat_binding_modes().get(p.hir_id) { if bm != ty::BindByValue(hir::MutImmutable) { // Nothing to check. return true; } let pat_ty = cx.tables.pat_ty(p); if let ty::Adt(edef, _) = pat_ty.sty { if edef.is_enum() && edef.variants.iter().any(|variant| { variant.name == ident.name && variant.ctor_kind == CtorKind::Const }) { let ty_path = cx.tcx.item_path_str(edef.did); let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170, "pattern binding `{}` is named the same as one \ of the variants of the type `{}`", ident, ty_path); err.span_suggestion_with_applicability( p.span, "to match on the variant, qualify the path", format!("{}::{}", ty_path, ident), Applicability::MachineApplicable ); err.emit(); } } } else { cx.tcx.sess.delay_span_bug(p.span, "missing binding mode"); } } true }); } /// Checks for common cases of "catchall" patterns that may not be intended as such. fn pat_is_catchall(pat: &Pat) -> bool { match pat.node { PatKind::Binding(.., None) => true, PatKind::Binding(.., Some(ref s)) => pat_is_catchall(s), PatKind::Ref(ref s, _) => pat_is_catchall(s), PatKind::Tuple(ref v, _) => v.iter().all(|p| { pat_is_catchall(&p) }), _ => false } } // Check for unreachable patterns fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, arms: &[(Vec<(&'a Pattern<'tcx>, &hir::Pat)>, Option<&hir::Expr>)], source: hir::MatchSource) { let mut seen = Matrix::empty(); let mut catchall = None; let mut printed_if_let_err = false; for (arm_index, &(ref pats, guard)) in arms.iter().enumerate() { for &(pat, hir_pat) in pats { let v = vec![pat]; match is_useful(cx, &seen, &v, LeaveOutWitness) { NotUseful => { match source { hir::MatchSource::IfLetDesugar { .. } => { if cx.tcx.features().irrefutable_let_patterns { cx.tcx.lint_node( lint::builtin::IRREFUTABLE_LET_PATTERNS, hir_pat.id, pat.span, "irrefutable if-let pattern"); } else { if printed_if_let_err { // we already printed an irrefutable if-let pattern error. // We don't want two, that's just confusing. } else { // find the first arm pattern so we can use its span let &(ref first_arm_pats, _) = &arms[0]; let first_pat = &first_arm_pats[0]; let span = first_pat.0.span; struct_span_err!(cx.tcx.sess, span, E0162, "irrefutable if-let pattern") .span_label(span, "irrefutable pattern") .emit(); printed_if_let_err = true; } } } hir::MatchSource::WhileLetDesugar => { // check which arm we're on. match arm_index { // The arm with the user-specified pattern. 0 => { cx.tcx.lint_node( lint::builtin::UNREACHABLE_PATTERNS, hir_pat.id, pat.span, "unreachable pattern"); }, // The arm with the wildcard pattern. 1 => { if cx.tcx.features().irrefutable_let_patterns { cx.tcx.lint_node( lint::builtin::IRREFUTABLE_LET_PATTERNS, hir_pat.id, pat.span, "irrefutable while-let pattern"); } else { // find the first arm pattern so we can use its span let &(ref first_arm_pats, _) = &arms[0]; let first_pat = &first_arm_pats[0]; let span = first_pat.0.span; struct_span_err!(cx.tcx.sess, span, E0165, "irrefutable while-let pattern") .span_label(span, "irrefutable pattern") .emit(); } }, _ => bug!(), } }, hir::MatchSource::ForLoopDesugar | hir::MatchSource::Normal => { let mut err = cx.tcx.struct_span_lint_node( lint::builtin::UNREACHABLE_PATTERNS, hir_pat.id, pat.span, "unreachable pattern", ); // if we had a catchall pattern, hint at that if let Some(catchall) = catchall { err.span_label(pat.span, "unreachable pattern"); err.span_label(catchall, "matches any value"); } err.emit(); }, // Unreachable patterns in try expressions occur when one of the arms // are an uninhabited type. Which is OK. hir::MatchSource::TryDesugar => {} } } Useful => (), UsefulWithWitness(_) => bug!() } if guard.is_none() { seen.push(v); if catchall.is_none() && pat_is_catchall(hir_pat) { catchall = Some(pat.span); } } } } } fn check_exhaustive<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, scrut_ty: Ty<'tcx>, sp: Span, matrix: &Matrix<'a, 'tcx>) { let wild_pattern = Pattern { ty: scrut_ty, span: DUMMY_SP, kind: box PatternKind::Wild, }; match is_useful(cx, matrix, &[&wild_pattern], ConstructWitness) { UsefulWithWitness(pats) => { let witnesses = if pats.is_empty() { vec![&wild_pattern] } else { pats.iter().map(|w| w.single_pattern()).collect() }; const LIMIT: usize = 3; let joined_patterns = match witnesses.len() { 0 => bug!(), 1 => format!("`{}`", witnesses[0]), 2..=LIMIT => { let (tail, head) = witnesses.split_last().unwrap(); let head: Vec<_> = head.iter().map(|w| w.to_string()).collect(); format!("`{}` and `{}`", head.join("`, `"), tail) }, _ => { let (head, tail) = witnesses.split_at(LIMIT); let head: Vec<_> = head.iter().map(|w| w.to_string()).collect(); format!("`{}` and {} more", head.join("`, `"), tail.len()) } }; let label_text = match witnesses.len() { 1 => format!("pattern {} not covered", joined_patterns), _ => format!("patterns {} not covered", joined_patterns) }; create_e0004(cx.tcx.sess, sp, format!("non-exhaustive patterns: {} not covered", joined_patterns)) .span_label(sp, label_text) .emit(); } NotUseful => { // This is good, wildcard pattern isn't reachable }, _ => bug!() } } // Legality of move bindings checking fn check_legality_of_move_bindings(cx: &MatchVisitor, has_guard: bool, pats: &[P<Pat>]) { let mut by_ref_span = None; for pat in pats { pat.each_binding(|_, hir_id, span, _path| { if let Some(&bm) = cx.tables.pat_binding_modes().get(hir_id) { if let ty::BindByReference(..) = bm { by_ref_span = Some(span); } } else { cx.tcx.sess.delay_span_bug(pat.span, "missing binding mode"); } }) } let span_vec = &mut Vec::new(); let check_move = |p: &Pat, sub: Option<&Pat>, span_vec: &mut Vec<Span>| { // check legality of moving out of the enum // x @ Foo(..) is legal, but x @ Foo(y) isn't. if sub.map_or(false, |p| p.contains_bindings()) { struct_span_err!(cx.tcx.sess, p.span, E0007, "cannot bind by-move with sub-bindings") .span_label(p.span, "binds an already bound by-move value by moving it") .emit(); } else if has_guard && !cx.tcx.allow_bind_by_move_patterns_with_guards() { let mut err = struct_span_err!(cx.tcx.sess, p.span, E0008, "cannot bind by-move into a pattern guard"); err.span_label(p.span, "moves value into pattern guard"); if cx.tcx.sess.opts.unstable_features.is_nightly_build() && cx.tcx.use_mir_borrowck() { err.help("add #![feature(bind_by_move_pattern_guards)] to the \ crate attributes to enable"); } err.emit(); } else if let Some(_by_ref_span) = by_ref_span { span_vec.push(p.span); } }; for pat in pats { pat.walk(|p| { if let PatKind::Binding(_, _, _, ref sub) = p.node { if let Some(&bm) = cx.tables.pat_binding_modes().get(p.hir_id) { match bm { ty::BindByValue(..) => { let pat_ty = cx.tables.node_id_to_type(p.hir_id); if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { check_move(p, sub.as_ref().map(|p| &**p), span_vec); } } _ => {} } } else { cx.tcx.sess.delay_span_bug(pat.span, "missing binding mode"); } } true }); } if !span_vec.is_empty(){ let span = MultiSpan::from_spans(span_vec.clone()); let mut err = struct_span_err!( cx.tcx.sess, span, E0009, "cannot bind by-move and by-ref in the same pattern", ); err.span_label(by_ref_span.unwrap(), "both by-ref and by-move used"); for span in span_vec.iter(){ err.span_label(*span, "by-move pattern here"); } err.emit(); } } /// Ensures that a pattern guard doesn't borrow by mutable reference or /// assign. /// /// FIXME: this should be done by borrowck. fn check_for_mutation_in_guard(cx: &MatchVisitor, guard: &hir::Guard) { let mut checker = MutationChecker { cx, }; match guard { hir::Guard::If(expr) => ExprUseVisitor::new(&mut checker, cx.tcx, cx.param_env, cx.region_scope_tree, cx.tables, None).walk_expr(expr), }; } struct MutationChecker<'a, 'tcx: 'a> { cx: &'a MatchVisitor<'a, 'tcx>, } impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> { fn matched_pat(&mut self, _: &Pat, _: &cmt_, _: euv::MatchMode) {} fn consume(&mut self, _: ast::NodeId, _: Span, _: &cmt_, _: ConsumeMode) {} fn consume_pat(&mut self, _: &Pat, _: &cmt_, _: ConsumeMode) {} fn borrow(&mut self, _: ast::NodeId, span: Span, _: &cmt_, _: ty::Region<'tcx>, kind:ty:: BorrowKind, _: LoanCause) { match kind { ty::MutBorrow => { let mut err = struct_span_err!(self.cx.tcx.sess, span, E0301, "cannot mutably borrow in a pattern guard"); err.span_label(span, "borrowed mutably in pattern guard"); if self.cx.tcx.sess.opts.unstable_features.is_nightly_build() && self.cx.tcx.use_mir_borrowck() { err.help("add #![feature(bind_by_move_pattern_guards)] to the \ crate attributes to enable"); } err.emit(); } ty::ImmBorrow | ty::UniqueImmBorrow => {} } } fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {} fn mutate(&mut self, _: ast::NodeId, span: Span, _: &cmt_, mode: MutateMode) { match mode { MutateMode::JustWrite | MutateMode::WriteAndRead => { struct_span_err!(self.cx.tcx.sess, span, E0302, "cannot assign in a pattern guard") .span_label(span, "assignment in pattern guard") .emit(); } MutateMode::Init => {} } } } /// Forbids bindings in `@` patterns. This is necessary for memory safety, /// because of the way rvalues are handled in the borrow check. (See issue /// #14587.) fn check_legality_of_bindings_in_at_patterns(cx: &MatchVisitor, pat: &Pat) { AtBindingPatternVisitor { cx: cx, bindings_allowed: true }.visit_pat(pat); } struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> { cx: &'a MatchVisitor<'b, 'tcx>, bindings_allowed: bool } impl<'a, 'b, 'tcx, 'v> Visitor<'v> for AtBindingPatternVisitor<'a, 'b, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { NestedVisitorMap::None } fn visit_pat(&mut self, pat: &Pat) { match pat.node { PatKind::Binding(.., ref subpat) => { if !self.bindings_allowed { struct_span_err!(self.cx.tcx.sess, pat.span, E0303, "pattern bindings are not allowed after an `@`") .span_label(pat.span, "not allowed after `@`") .emit(); } if subpat.is_some() { let bindings_were_allowed = self.bindings_allowed; self.bindings_allowed = false; intravisit::walk_pat(self, pat); self.bindings_allowed = bindings_were_allowed; } } _ => intravisit::walk_pat(self, pat), } } }
40.657514
99
0.480398
d550bca00b9d7711158495c2e6fbb19ff5f1fcb4
13,570
//! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library. //! //! # Usage //! //! Begin by creating a `Client` type, usually using the `https` method for a client that supports //! secure connections, and then logging in: //! //! ```no_run //! use ruma_client::Client; //! //! let work = async { //! let homeserver_url = "https://example.com".parse().unwrap(); //! let client = Client::https(homeserver_url, None); //! //! let session = client //! .log_in("@alice:example.com".to_string(), "secret".to_string(), None) //! .await?; //! //! // You're now logged in! Write the session to a file if you want to restore it later. //! // Then start using the API! //! # Result::<(), ruma_client::Error>::Ok(()) //! }; //! ``` //! //! You can also pass an existing session to the `Client` constructor to restore a previous session //! rather than calling `log_in`. //! //! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest //! events), use the `Client::sync`: //! //! ```no_run //! # use futures_util::stream::{StreamExt as _, TryStreamExt as _}; //! # use ruma_client::Client; //! # let homeserver_url = "https://example.com".parse().unwrap(); //! # let client = Client::https(homeserver_url, None); //! # async { //! let mut sync_stream = Box::pin(client.sync(None, None, true)); //! while let Some(response) = sync_stream.try_next().await? { //! // Do something with the data in the response... //! } //! # Result::<(), ruma_client::Error>::Ok(()) //! # }; //! ``` //! //! The `Client` type also provides methods for registering a new account if you don't already have //! one with the given homeserver. //! //! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix //! client-server API via the `api` module. Each leaf module under this tree of modules contains //! the necessary types for one API endpoint. Simply call the module's `call` method, passing it //! the logged in `Client` and the relevant `Request` type. `call` will return a future that will //! resolve to the relevant `Response` type. //! //! For example: //! //! ```no_run //! # use ruma_client::Client; //! # let homeserver_url = "https://example.com".parse().unwrap(); //! # let client = Client::https(homeserver_url, None); //! use std::convert::TryFrom; //! //! use ruma_client::api::r0::alias::get_alias; //! use ruma_identifiers::{RoomAliasId, RoomId}; //! //! async { //! let response = client //! .request(get_alias::Request { //! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(), //! }) //! .await?; //! //! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap()); //! # Result::<(), ruma_client::Error>::Ok(()) //! } //! # ; //! ``` #![warn(rust_2018_idioms)] #![deny( missing_copy_implementations, missing_debug_implementations, missing_docs )] use std::{ convert::TryFrom, str::FromStr, sync::{Arc, Mutex}, }; use futures_core::{ future::Future, stream::{Stream, TryStream}, }; use futures_util::stream; use http::Response as HttpResponse; use hyper::{client::HttpConnector, Client as HyperClient, Uri}; #[cfg(feature = "hyper-tls")] use hyper_tls::HttpsConnector; use ruma_api::{Endpoint, Outgoing}; use url::Url; use crate::error::InnerError; pub use crate::{error::Error, session::Session}; pub use ruma_client_api as api; pub use ruma_events as events; pub use ruma_identifiers as identifiers; /// Matrix client-server API endpoints. //pub mod api; mod error; mod session; /// A client for the Matrix client-server API. #[derive(Debug)] pub struct Client<C>(Arc<ClientData<C>>); /// Data contained in Client's Rc #[derive(Debug)] struct ClientData<C> { /// The URL of the homeserver to connect to. homeserver_url: Url, /// The underlying HTTP client. hyper: HyperClient<C>, /// User session data. session: Mutex<Option<Session>>, } /// Non-secured variant of the client (using plain HTTP requests) pub type HttpClient = Client<HttpConnector>; impl HttpClient { /// Creates a new client for making HTTP requests to the given homeserver. pub fn new(homeserver_url: Url, session: Option<Session>) -> Self { Self(Arc::new(ClientData { homeserver_url, hyper: HyperClient::builder().keep_alive(true).build_http(), session: Mutex::new(session), })) } /// Get a copy of the current `Session`, if any. /// /// Useful for serializing and persisting the session to be restored later. pub fn session(&self) -> Option<Session> { self.0 .session .lock() .expect("session mutex was poisoned") .clone() } } /// Secured variant of the client (using HTTPS requests) #[cfg(feature = "tls")] pub type HttpsClient = Client<HttpsConnector<HttpConnector>>; #[cfg(feature = "tls")] impl HttpsClient { /// Creates a new client for making HTTPS requests to the given homeserver. pub fn https(homeserver_url: Url, session: Option<Session>) -> Self { let connector = HttpsConnector::new(); Self(Arc::new(ClientData { homeserver_url, hyper: HyperClient::builder().keep_alive(true).build(connector), session: Mutex::new(session), })) } } impl<C> Client<C> where C: hyper::client::connect::Connect + Clone + Send + Sync + 'static, { /// Creates a new client using the given `hyper::Client`. /// /// This allows the user to configure the details of HTTP as desired. pub fn custom( hyper_client: HyperClient<C>, homeserver_url: Url, session: Option<Session>, ) -> Self { Self(Arc::new(ClientData { homeserver_url, hyper: hyper_client, session: Mutex::new(session), })) } /// Log in with a username and password. /// /// In contrast to `api::r0::session::login::call()`, this method stores the /// session data returned by the endpoint in this client, instead of /// returning it. pub async fn log_in( &self, user: String, password: String, device_id: Option<String>, ) -> Result<Session, Error> { use api::r0::session::login; let response = self .request(login::Request { address: None, login_type: login::LoginType::Password, medium: None, device_id, password, user, }) .await?; let session = Session { access_token: response.access_token, device_id: response.device_id, user_id: response.user_id, }; *self.0.session.lock().unwrap() = Some(session.clone()); Ok(session) } /// Register as a guest. In contrast to `api::r0::account::register::call()`, /// this method stores the session data returned by the endpoint in this /// client, instead of returning it. pub async fn register_guest(&self) -> Result<Session, Error> { use api::r0::account::register; let response = self .request(register::Request { auth: None, bind_email: None, device_id: None, initial_device_display_name: None, kind: Some(register::RegistrationKind::Guest), password: None, username: None, }) .await?; let session = Session { access_token: response.access_token, device_id: response.device_id, user_id: response.user_id, }; *self.0.session.lock().unwrap() = Some(session.clone()); Ok(session) } /// Register as a new user on this server. /// /// In contrast to `api::r0::account::register::call()`, this method stores /// the session data returned by the endpoint in this client, instead of /// returning it. /// /// The username is the local part of the returned user_id. If it is /// omitted from this request, the server will generate one. pub async fn register_user( &self, username: Option<String>, password: String, ) -> Result<Session, Error> { use api::r0::account::register; let response = self .request(register::Request { auth: None, bind_email: None, device_id: None, initial_device_display_name: None, kind: Some(register::RegistrationKind::User), password: Some(password), username, }) .await?; let session = Session { access_token: response.access_token, device_id: response.device_id, user_id: response.user_id, }; *self.0.session.lock().unwrap() = Some(session.clone()); Ok(session) } /// Convenience method that represents repeated calls to the sync_events endpoint as a stream. /// /// If the since parameter is None, the first Item might take a significant time to arrive and /// be deserialized, because it contains all events that have occurred in the whole lifetime of /// the logged-in users account and are visible to them. pub fn sync( &self, filter: Option<api::r0::sync::sync_events::Filter>, since: Option<String>, set_presence: bool, ) -> impl Stream<Item = Result<api::r0::sync::sync_events::IncomingResponse, Error>> + TryStream<Ok = api::r0::sync::sync_events::IncomingResponse, Error = Error> { use api::r0::sync::sync_events; // TODO: Is this really the way TryStreams are supposed to work? #[derive(Debug, PartialEq, Eq)] enum State { InitialSync, Since(String), Errored, } let client = self.clone(); let set_presence = if set_presence { None } else { Some(sync_events::SetPresence::Offline) }; let initial_state = match since { Some(s) => State::Since(s), None => State::InitialSync, }; stream::unfold(initial_state, move |state| { let client = client.clone(); let filter = filter.clone(); async move { let since = match state { State::Errored => return None, State::Since(s) => Some(s), State::InitialSync => None, }; let res = client .request(sync_events::Request { filter, since, full_state: None, set_presence, timeout: None, }) .await; match res { Ok(response) => { let next_batch_clone = response.next_batch.clone(); Some((Ok(response), State::Since(next_batch_clone))) } Err(e) => Some((Err(e), State::Errored)), } } }) } /// Makes a request to a Matrix API endpoint. pub fn request<Request: Endpoint>( &self, request: Request, ) -> impl Future<Output = Result<<Request::Response as Outgoing>::Incoming, Error>> // We need to duplicate Endpoint's where clauses because the compiler is not smart enough yet. // See https://github.com/rust-lang/rust/issues/54149 where Request::Incoming: TryFrom<http::Request<Vec<u8>>, Error = ruma_api::Error>, <Request::Response as Outgoing>::Incoming: TryFrom<http::Response<Vec<u8>>, Error = ruma_api::Error>, { let client = self.0.clone(); let mut url = client.homeserver_url.clone(); async move { let mut hyper_request = request.try_into()?.map(hyper::Body::from); { let uri = hyper_request.uri(); url.set_path(uri.path()); url.set_query(uri.query()); if Request::METADATA.requires_authentication { if let Some(ref session) = *client.session.lock().unwrap() { url.query_pairs_mut() .append_pair("access_token", &session.access_token); } else { return Err(Error(InnerError::AuthenticationRequired)); } } } *hyper_request.uri_mut() = Uri::from_str(url.as_ref())?; let hyper_response = client.hyper.request(hyper_request).await?; let (head, body) = hyper_response.into_parts(); // FIXME: We read the response into a contiguous buffer here (not actually required for // deserialization) and then copy the whole thing to convert from Bytes to Vec<u8>. let full_body = hyper::body::to_bytes(body).await?; let full_response = HttpResponse::from_parts(head, full_body.as_ref().to_owned()); Ok(<Request::Response as Outgoing>::Incoming::try_from( full_response, )?) } } } impl<C> Clone for Client<C> { fn clone(&self) -> Self { Self(self.0.clone()) } }
32.777778
99
0.575534
eb3b722a18b573e6313f4d9dab95385270b4daaf
3,004
//! //! mapping.map support was intentionally removed. I'd like to see if we can //! do it without it. If not, I'll put it back. But less stuff we have, more //! better it is. //! use std::collections::HashMap; use serde_derive::Deserialize; use serde_json::Value; use crate::deref::OptionDeref; pub use self::{ filename::FileName, target::{LocationPartition, RawTarget, Target, TargetFormat, TargetLocation, TargetType}, }; mod filename; mod target; /// Mapping structure #[derive(Debug, Deserialize)] pub struct Mapping { #[serde(default, skip_serializing_if = "HashMap::is_empty")] targets: HashMap<String, RawTarget>, #[serde(default, skip_serializing_if = "Option::is_none")] target: Option<Target>, #[serde(default, skip_serializing_if = "Option::is_none")] filename: Option<FileName>, #[serde(default, skip_serializing_if = "Option::is_none")] path: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] template: Option<Value>, } impl Mapping { pub fn targets(&self) -> &HashMap<String, RawTarget> { &self.targets } pub fn target(&self) -> Option<&Target> { self.target.as_ref() } pub fn filename(&self) -> Option<&FileName> { self.filename.as_ref() } pub fn path(&self) -> Option<&str> { self.path.as_deref() } pub fn template(&self) -> Option<&Value> { self.template.as_ref() } } #[cfg(test)] mod tests { use super::*; #[test] fn path() { let m: Mapping = serde_yaml::from_str("path: /config.json").unwrap(); assert_eq!(m.path(), Some("/config.json")); } #[test] fn filename() { let m: Mapping = serde_yaml::from_str("filename: foo.txt").unwrap(); assert_eq!(m.filename().unwrap().name(), Some("foo.txt")); } #[test] fn target_as_ref() { // Other target options are tested inside target.rs let m: Mapping = serde_yaml::from_str("target: config_json").unwrap(); assert_eq!(m.target().unwrap().reference(), Some("config_json")); } #[test] fn template() { let schema = r#" template: string "#; // Other target options are tested inside target.rs let m: Mapping = serde_yaml::from_str(schema).unwrap(); assert_eq!(m.template(), Some(&Value::String("string".to_string()))); } #[test] fn targets() { let schema = r#" targets: config_json: type: file format: json location: partition: resin-boot path: /config.json "#; let m: Mapping = serde_yaml::from_str(schema).unwrap(); let t = &m.targets["config_json"]; assert_eq!(t.type_(), &TargetType::File); assert_eq!(t.format(), &TargetFormat::Json); assert_eq!(t.location().partition().label(), Some("resin-boot")); assert_eq!(t.location().path(), "/config.json"); } }
25.896552
93
0.594541
ffc6e404b5e3c46dd959042cb475711c58d3357e
18,190
//! Support for capturing a stack backtrace of an OS thread //! //! This module contains the support necessary to capture a stack backtrace of a //! running OS thread from the OS thread itself. The `Backtrace` type supports //! capturing a stack trace via the `Backtrace::capture` and //! `Backtrace::force_capture` functions. //! //! A backtrace is typically quite handy to attach to errors (e.g. types //! implementing `std::error::Error`) to get a causal chain of where an error //! was generated. //! //! > **Note**: this module is unstable and is designed in [RFC 2504], and you //! > can learn more about its status in the [tracking issue]. //! //! [RFC 2504]: https://github.com/rust-lang/rfcs/blob/master/text/2504-fix-error.md //! [tracking issue]: https://github.com/rust-lang/rust/issues/53487 //! //! ## Accuracy //! //! Backtraces are attempted to be as accurate as possible, but no guarantees //! are provided about the exact accuracy of a backtrace. Instruction pointers, //! symbol names, filenames, line numbers, etc, may all be incorrect when //! reported. Accuracy is attempted on a best-effort basis, however, and bugs //! are always welcome to indicate areas of improvement! //! //! For most platforms a backtrace with a filename/line number requires that //! programs be compiled with debug information. Without debug information //! filenames/line numbers will not be reported. //! //! ## Platform support //! //! Not all platforms that libstd compiles for support capturing backtraces. //! Some platforms simply do nothing when capturing a backtrace. To check //! whether the platform supports capturing backtraces you can consult the //! `BacktraceStatus` enum as a result of `Backtrace::status`. //! //! Like above with accuracy platform support is done on a best effort basis. //! Sometimes libraries may not be available at runtime or something may go //! wrong which would cause a backtrace to not be captured. Please feel free to //! report issues with platforms where a backtrace cannot be captured though! //! //! ## Environment Variables //! //! The `Backtrace::capture` function may not actually capture a backtrace by //! default. Its behavior is governed by two environment variables: //! //! * `RUST_LIB_BACKTRACE` - if this is set to `0` then `Backtrace::capture` //! will never capture a backtrace. Any other value this is set to will enable //! `Backtrace::capture`. //! //! * `RUST_BACKTRACE` - if `RUST_LIB_BACKTRACE` is not set, then this variable //! is consulted with the same rules of `RUST_LIB_BACKTRACE`. //! //! * If neither of the above env vars are set, then `Backtrace::capture` will //! be disabled. //! //! Capturing a backtrace can be a quite expensive runtime operation, so the //! environment variables allow either forcibly disabling this runtime //! performance hit or allow selectively enabling it in some programs. //! //! Note that the `Backtrace::force_capture` function can be used to ignore //! these environment variables. Also note that the state of environment //! variables is cached once the first backtrace is created, so altering //! `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` at runtime may not actually change //! how backtraces are captured. #![unstable(feature = "backtrace", issue = "53487")] // NB: A note on resolution of a backtrace: // // Backtraces primarily happen in two steps, one is where we actually capture // the stack backtrace, giving us a list of instruction pointers corresponding // to stack frames. Next we take these instruction pointers and, one-by-one, // turn them into a human readable name (like `main`). // // The first phase can be somewhat expensive (walking the stack), especially // on MSVC where debug information is consulted to return inline frames each as // their own frame. The second phase, however, is almost always extremely // expensive (on the order of milliseconds sometimes) when it's consulting debug // information. // // We attempt to amortize this cost as much as possible by delaying resolution // of an address to a human readable name for as long as possible. When // `Backtrace::create` is called to capture a backtrace it doesn't actually // perform any symbol resolution, but rather we lazily resolve symbols only just // before they're needed for printing. This way we can make capturing a // backtrace and throwing it away much cheaper, but actually printing a // backtrace is still basically the same cost. // // This strategy comes at the cost of some synchronization required inside of a // `Backtrace`, but that's a relatively small price to pay relative to capturing // a backtrace or actually symbolizing it. use crate::env; use crate::ffi::c_void; use crate::fmt; use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; use crate::sync::Mutex; use crate::sys_common::backtrace::{lock, output_filename}; use crate::vec::Vec; use backtrace::BytesOrWideString; use backtrace_rs as backtrace; /// A captured OS thread stack backtrace. /// /// This type represents a stack backtrace for an OS thread captured at a /// previous point in time. In some instances the `Backtrace` type may /// internally be empty due to configuration. For more information see /// `Backtrace::capture`. pub struct Backtrace { inner: Inner, } /// The current status of a backtrace, indicating whether it was captured or /// whether it is empty for some other reason. #[non_exhaustive] #[derive(Debug, PartialEq, Eq)] pub enum BacktraceStatus { /// Capturing a backtrace is not supported, likely because it's not /// implemented for the current platform. Unsupported, /// Capturing a backtrace has been disabled through either the /// `RUST_LIB_BACKTRACE` or `RUST_BACKTRACE` environment variables. Disabled, /// A backtrace has been captured and the `Backtrace` should print /// reasonable information when rendered. Captured, } enum Inner { Unsupported, Disabled, Captured(Mutex<Capture>), } struct Capture { actual_start: usize, resolved: bool, frames: Vec<BacktraceFrame>, } fn _assert_send_sync() { fn _assert<T: Send + Sync>() {} _assert::<Backtrace>(); } struct BacktraceFrame { frame: RawFrame, symbols: Vec<BacktraceSymbol>, } enum RawFrame { Actual(backtrace::Frame), #[cfg(test)] Fake, } struct BacktraceSymbol { name: Option<Vec<u8>>, filename: Option<BytesOrWide>, lineno: Option<u32>, } enum BytesOrWide { Bytes(Vec<u8>), Wide(Vec<u16>), } impl fmt::Debug for Backtrace { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let mut capture = match &self.inner { Inner::Unsupported => return fmt.write_str("<unsupported>"), Inner::Disabled => return fmt.write_str("<disabled>"), Inner::Captured(c) => c.lock().unwrap(), }; capture.resolve(); let frames = &capture.frames[capture.actual_start..]; write!(fmt, "Backtrace ")?; let mut dbg = fmt.debug_list(); for frame in frames { if frame.frame.ip().is_null() { continue; } dbg.entries(&frame.symbols); } dbg.finish() } } impl fmt::Debug for BacktraceSymbol { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{{ ")?; if let Some(fn_name) = self.name.as_ref().map(|b| backtrace::SymbolName::new(b)) { write!(fmt, "fn: \"{:#}\"", fn_name)?; } else { write!(fmt, "fn: <unknown>")?; } if let Some(fname) = self.filename.as_ref() { write!(fmt, ", file: \"{:?}\"", fname)?; } if let Some(line) = self.lineno.as_ref() { write!(fmt, ", line: {:?}", line)?; } write!(fmt, " }}") } } impl fmt::Debug for BytesOrWide { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { output_filename( fmt, match self { BytesOrWide::Bytes(w) => BytesOrWideString::Bytes(w), BytesOrWide::Wide(w) => BytesOrWideString::Wide(w), }, backtrace::PrintFmt::Short, None, ) } } impl Backtrace { /// Returns whether backtrace captures are enabled through environment /// variables. fn enabled() -> bool { // Cache the result of reading the environment variables to make // backtrace captures speedy, because otherwise reading environment // variables every time can be somewhat slow. static ENABLED: AtomicUsize = AtomicUsize::new(0); match ENABLED.load(SeqCst) { 0 => {} 1 => return false, _ => return true, } /* TODO: Figure out something to rpelace env, maybe compile time hardocded vals? let enabled = match env::var("RUST_LIB_BACKTRACE") { Ok(s) => s != "0", Err(_) => match env::var("RUST_BACKTRACE") { Ok(s) => s != "0", Err(_) => false, }, }; */ let enabled = true; ENABLED.store(enabled as usize + 1, SeqCst); enabled } /// Capture a stack backtrace of the current thread. /// /// This function will capture a stack backtrace of the current OS thread of /// execution, returning a `Backtrace` type which can be later used to print /// the entire stack trace or render it to a string. /// /// This function will be a noop if the `RUST_BACKTRACE` or /// `RUST_LIB_BACKTRACE` backtrace variables are both not set. If either /// environment variable is set and enabled then this function will actually /// capture a backtrace. Capturing a backtrace can be both memory intensive /// and slow, so these environment variables allow liberally using /// `Backtrace::capture` and only incurring a slowdown when the environment /// variables are set. /// /// To forcibly capture a backtrace regardless of environment variables, use /// the `Backtrace::force_capture` function. #[inline(never)] // want to make sure there's a frame here to remove pub fn capture() -> Backtrace { if !Backtrace::enabled() { return Backtrace { inner: Inner::Disabled }; } Backtrace::create(Backtrace::capture as usize) } /// Forcibly captures a full backtrace, regardless of environment variable /// configuration. /// /// This function behaves the same as `capture` except that it ignores the /// values of the `RUST_BACKTRACE` and `RUST_LIB_BACKTRACE` environment /// variables, always capturing a backtrace. /// /// Note that capturing a backtrace can be an expensive operation on some /// platforms, so this should be used with caution in performance-sensitive /// parts of code. #[inline(never)] // want to make sure there's a frame here to remove pub fn force_capture() -> Backtrace { Backtrace::create(Backtrace::force_capture as usize) } // Capture a backtrace which start just before the function addressed by // `ip` fn create(ip: usize) -> Backtrace { let _lock = lock(); let mut frames = Vec::new(); let mut actual_start = None; unsafe { backtrace::trace_unsynchronized(|frame| { frames.push(BacktraceFrame { frame: RawFrame::Actual(frame.clone()), symbols: Vec::new(), }); if frame.symbol_address() as usize == ip && actual_start.is_none() { actual_start = Some(frames.len()); } true }); } // If no frames came out assume that this is an unsupported platform // since `backtrace` doesn't provide a way of learning this right now, // and this should be a good enough approximation. let inner = if frames.is_empty() { Inner::Unsupported } else { Inner::Captured(Mutex::new(Capture { actual_start: actual_start.unwrap_or(0), frames, resolved: false, })) }; Backtrace { inner } } /// Returns the status of this backtrace, indicating whether this backtrace /// request was unsupported, disabled, or a stack trace was actually /// captured. pub fn status(&self) -> BacktraceStatus { match self.inner { Inner::Unsupported => BacktraceStatus::Unsupported, Inner::Disabled => BacktraceStatus::Disabled, Inner::Captured(_) => BacktraceStatus::Captured, } } } impl fmt::Display for Backtrace { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let mut capture = match &self.inner { Inner::Unsupported => return fmt.write_str("unsupported backtrace"), Inner::Disabled => return fmt.write_str("disabled backtrace"), Inner::Captured(c) => c.lock().unwrap(), }; capture.resolve(); let full = fmt.alternate(); let (frames, style) = if full { (&capture.frames[..], backtrace::PrintFmt::Full) } else { (&capture.frames[capture.actual_start..], backtrace::PrintFmt::Short) }; // When printing paths we try to strip the cwd if it exists, otherwise // we just print the path as-is. Note that we also only do this for the // short format, because if it's full we presumably want to print // everything. let mut print_path = move |fmt: &mut fmt::Formatter<'_>, path: BytesOrWideString<'_>| { output_filename(fmt, path, style, None) }; let mut f = backtrace::BacktraceFmt::new(fmt, style, &mut print_path); f.add_context()?; for frame in frames { let mut f = f.frame(); if frame.symbols.is_empty() { f.print_raw(frame.frame.ip(), None, None, None)?; } else { for symbol in frame.symbols.iter() { f.print_raw( frame.frame.ip(), symbol.name.as_ref().map(|b| backtrace::SymbolName::new(b)), symbol.filename.as_ref().map(|b| match b { BytesOrWide::Bytes(w) => BytesOrWideString::Bytes(w), BytesOrWide::Wide(w) => BytesOrWideString::Wide(w), }), symbol.lineno, )?; } } } f.finish()?; Ok(()) } } impl Capture { fn resolve(&mut self) { // If we're already resolved, nothing to do! if self.resolved { return; } self.resolved = true; // Use the global backtrace lock to synchronize this as it's a // requirement of the `backtrace` crate, and then actually resolve // everything. let _lock = lock(); for frame in self.frames.iter_mut() { let symbols = &mut frame.symbols; let frame = match &frame.frame { RawFrame::Actual(frame) => frame, #[cfg(test)] RawFrame::Fake => unimplemented!(), }; unsafe { backtrace::resolve_frame_unsynchronized(frame, |symbol| { symbols.push(BacktraceSymbol { name: symbol.name().map(|m| m.as_bytes().to_vec()), filename: symbol.filename_raw().map(|b| match b { BytesOrWideString::Bytes(b) => BytesOrWide::Bytes(b.to_owned()), BytesOrWideString::Wide(b) => BytesOrWide::Wide(b.to_owned()), }), lineno: symbol.lineno(), }); }); } } } } impl RawFrame { fn ip(&self) -> *mut c_void { match self { RawFrame::Actual(frame) => frame.ip(), #[cfg(test)] RawFrame::Fake => 1 as *mut c_void, } } } #[test] fn test_debug() { let backtrace = Backtrace { inner: Inner::Captured(Mutex::new(Capture { actual_start: 1, resolved: true, frames: vec![ BacktraceFrame { frame: RawFrame::Fake, symbols: vec![BacktraceSymbol { name: Some(b"std::backtrace::Backtrace::create".to_vec()), filename: Some(BytesOrWide::Bytes(b"rust/backtrace.rs".to_vec())), lineno: Some(100), }], }, BacktraceFrame { frame: RawFrame::Fake, symbols: vec![BacktraceSymbol { name: Some(b"__rust_maybe_catch_panic".to_vec()), filename: None, lineno: None, }], }, BacktraceFrame { frame: RawFrame::Fake, symbols: vec![ BacktraceSymbol { name: Some(b"std::rt::lang_start_internal".to_vec()), filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())), lineno: Some(300), }, BacktraceSymbol { name: Some(b"std::rt::lang_start".to_vec()), filename: Some(BytesOrWide::Bytes(b"rust/rt.rs".to_vec())), lineno: Some(400), }, ], }, ], })), }; #[rustfmt::skip] let expected = "Backtrace [\ \n { fn: \"__rust_maybe_catch_panic\" },\ \n { fn: \"std::rt::lang_start_internal\", file: \"rust/rt.rs\", line: 300 },\ \n { fn: \"std::rt::lang_start\", file: \"rust/rt.rs\", line: 400 },\ \n]"; assert_eq!(format!("{:#?}", backtrace), expected); }
37.122449
95
0.592303
8a2dbe35bf474f7ccd13788a211084fae2b388f6
957
use crate::prelude::*; /// Shared wrap the Observable, subscribe and accept subscribe in a safe mode /// by SharedObservable. #[derive(Clone)] pub struct Shared<R>(pub(crate) R); pub trait SharedObservable: Observable { type Unsub: SubscriptionLike + Sync + Send + 'static; fn actual_subscribe<O>(self, observer: O) -> Self::Unsub where O: Observer<Item = Self::Item, Err = Self::Err> + Sync + Send + 'static; /// Convert to a thread-safe mode. #[inline] fn into_shared(self) -> Shared<Self> where Self: Sized, { Shared(self) } } impl<S: Observable> Observable for Shared<S> { type Item = S::Item; type Err = S::Err; } impl<S> SharedObservable for Shared<S> where S: SharedObservable, { type Unsub = S::Unsub; #[inline] fn actual_subscribe<O>(self, observer: O) -> Self::Unsub where O: Observer<Item = Self::Item, Err = Self::Err> + Sync + Send + 'static, { self.0.actual_subscribe(observer) } }
22.785714
77
0.656217
69cab8450d8cc81cb5e08cd89355ef1c6762e150
8,965
#![feature(seek_stream_len)] mod fio; use std::io::{Seek, SeekFrom, Write, ErrorKind, Read}; use std::path::{Path, PathBuf}; use std::fs::{OpenOptions}; use std::time::SystemTime; use serde::{Serialize, Deserialize}; use std::borrow::{Borrow, BorrowMut}; use crate::fio::MetadataSpace; type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>; static mut TABLE_LOCATION: Option<u64> = None; const METADATA_SPACE_SIZE: u64 = 1000; const DEFAULT_ACCESSES_PER_SHIFT: u64 = 500; const MAGIC_IDENTIFIER: u64 = 0x8d2765dd2bc8bf74; ///Proof of concept demonstration of STFS (Shifting table filesystem) #[derive(Serialize, Deserialize)] struct STFSFileMetadata { _start: u64, _len: u64, _flags: u16, _modified: SystemTime, _accessed: SystemTime, _created: SystemTime, _path: PathBuf, } #[derive(Serialize, Deserialize)] struct ShiftingTable { ///If this is set to true, then reads will decrement the _accesses_left field (i.e. reads will contribute towards a shift) _shift_on_read: bool, ///If this is set to true, then writes will decrement the _accesses_left field (i.e. writes will contribute towards a shift) _shift_on_write: bool, ///This value keeps track of how many table read/writes are allowed before a shift is initiated _accesses_left: u64, ///Maximum number of read/writes to the table before a shift is initiated _accesses_per_shift: u64, ///Size, in sectors, of the table _table_size: u64, ///Magic constant to verify table. Not sure if this is really needed _magic: u64, ///Metadata for the one file on the filesystem _files_data: Vec<STFSFileMetadata>, } impl ShiftingTable { fn new() -> Self { let mut s = Self { _shift_on_read: true, _shift_on_write: true, _accesses_left: DEFAULT_ACCESSES_PER_SHIFT, _accesses_per_shift: DEFAULT_ACCESSES_PER_SHIFT, _table_size: 0, _magic: MAGIC_IDENTIFIER, _files_data: Vec::new(), }; s.set_table_size(); s } fn set_table_size(& mut self) { self._table_size = (bincode::serialize(&self).unwrap().len() as f64 / 496.0f64).ceil() as u64 } } ///We wrap the following functions as multithreaded acces is not a problem in this project fn set_table_location(location: u64) { unsafe { TABLE_LOCATION = Some(location); } } fn get_table_location() -> u64 { unsafe { TABLE_LOCATION.unwrap() } } /// Create a fake storage media as a file fn create<P: AsRef<Path>>(media: P, size: u64) -> Result<()> { let fp = OpenOptions::new() .write(true) .create(true) .truncate(true) .open(media)?; fp.set_len(size)?; Ok(()) } /// Write the STFS table to a specific sector fn write_table<S: Read + Write + Seek>(stream: S, location: u64, table: & ShiftingTable) -> Result<()> { let writer = MetadataSpace::new(stream, location); bincode::serialize_into(writer, table)?; Ok(()) } fn read_table<S: Read + Write + Seek>(stream: S, location: u64) -> Result<ShiftingTable> { let reader = MetadataSpace::new(stream, location); Ok(bincode::deserialize_from(reader)?) } /// Format the media as STFS fn format<S: Read + Write + Seek>(mut stream: S, metadata_space_size: u64) -> Result<()> { let sector_count = stream.stream_len()? / 512; if METADATA_SPACE_SIZE >= sector_count { return Err(Box::new(std::io::Error::new(ErrorKind::Other, "Storage media is too small"))); } //Before we create a table, we must lay an initial trail across the entire metadata space // Putting down an initial trail allows the binary search algorithm to find the table // even if the table hasn't yet done a complete pass of the metadata space //Iterate over all sectors in metadata space and lay the initial trail which goes from 0 to METADATA_SPACE_SIZE-1 for i in 0..metadata_space_size { let step_number_start = i * 512 + 496; stream.seek(SeekFrom::Start(step_number_start))?; bincode::serialize_into(& mut stream, &(i as u128))?; } // Writing the table to the beginning, we must include the new trail at METADATA_SPACE_SIZE. // So by this point, say the METADATA_SPACE_SIZE is 10. We should have a trail that looks like // 10 1 2 3 4 5 6 7 8 9 stream.seek(SeekFrom::Start(496))?; bincode::serialize_into(& mut stream, &(METADATA_SPACE_SIZE as u128))?; let table = ShiftingTable::new(); write_table(stream, 0, & table)?; Ok(()) } /// Search for the table fn search<S: Read + Write + Seek>(mut stream: S) -> Result<u64> { let mut start = 0; let mut end = METADATA_SPACE_SIZE-1; loop { stream.seek(SeekFrom::Start(start * 512 + 496))?; let start_step: u128 = bincode::deserialize_from(& mut stream)?; let middle = ((start + end) as f64 / 2.0f64).floor() as u64; stream.seek(SeekFrom::Start(middle * 512 + 496))?; let middle_step: u128 = bincode::deserialize_from(& mut stream)?; if start_step > middle_step { end = middle; } else if start_step < middle_step { start = middle; } else { return Ok(start); } } } ///Equivalent to the FUSE init function fn initialise<S: Read + Write + Seek>(stream: S) -> Result<()> { //When the FS is mounted, we manually search for the table then store the location in memory. //When we shift the table, se keep track of this in memory to avoid having to search each time set_table_location(search(stream)?); Ok(()) } ///Shift the table by one sector, updating metadata and leaving behind a trail. (sectors left behind contain zeros in the first 496 bytes fn shift_table<S: Read + Write + Seek>(mut stream: S) -> Result<()> { //Get location let table_location = get_table_location(); //Read the table let table = read_table(stream.borrow_mut(), table_location)?; //Check the number of sectors left after the table. // If the number of sectors left is less than or equal to the size of the table, // Then fill out the trail to the enc of the storage, then wrap around to the beginning stream.seek(SeekFrom::Start(table_location * 512))?; stream.write(&[0u8; 496])?; let mut current_trail: u128 = bincode::deserialize_from(& mut stream)?; let (new_location, last_trail) = if table_location + table._table_size >= METADATA_SPACE_SIZE - 1 { for i in table_location+1..METADATA_SPACE_SIZE { current_trail += 1; stream.seek(SeekFrom::Start( i * 512))?; stream.write(&[0u8; 496])?; bincode::serialize_into(& mut stream, &(current_trail as u128))?; } (0, current_trail+1) } else { (table_location+1, current_trail+1) }; //Write the table to the next position write_table(stream.borrow_mut(), new_location, &table)?; //Leave behind a trail in the previous sector (make sure the rest of the sector is cleared too stream.seek(SeekFrom::Start(new_location * 512 + 496))?; bincode::serialize_into(& mut stream, &last_trail)?; //Update the TABLE_LOCATION variable set_table_location(new_location); Ok(()) } ///Called whenever the table is read or written to. Decrements the _accesses_left field and may initiate a shift fn access<S: Read + Write + Seek>(mut stream: S) -> Result<()> { //Load the table let mut table = read_table(stream.borrow_mut(), get_table_location())?; //Decrement accesses left table._accesses_left -= 1; //Shift the table forward & reset the count if table._accesses_left == 0 { shift_table(stream.borrow_mut())?; table._accesses_left = table._accesses_per_shift; } //Write the new table write_table(stream.borrow_mut(), get_table_location(), &table)?; Ok(()) } ///Used to shift-specific metadata fn edit_table<S: Read + Write + Seek>(mut stream: S, read_shift: Option<bool>, write_shift: Option<bool>, accesses_per_shift: Option<u64>) -> Result<()> { let mut table = read_table(stream, get_table_location())?; if let Some(read) = read_shift { table._shift_on_read = read; } if let Some(write) = write_shift { table._shift_on_write = write; } if let Some(accesses) = accesses_per_shift { table._accesses_per_shift = accesses; } Ok(()) } fn main() { let media_path = "storage/test"; create(media_path.borrow(), 512 * 10000).unwrap(); let mut fp = OpenOptions::new() .read(true) .write(true) .open(media_path.borrow()).unwrap(); format(fp.borrow_mut(), METADATA_SPACE_SIZE).unwrap(); initialise(fp.borrow_mut()).unwrap(); for _ in 0..500*9 { access(fp.borrow_mut()).unwrap(); } println!("Search: {}", get_table_location()); }
28.826367
154
0.653988
cca451ac794a170d08786749dfe1421995ef3f74
3,191
use eos::{ gregorian::{MAX_EPOCH_DAYS, MIN_EPOCH_DAYS}, DateTime, TimeZone, Utc, }; /// A naive Unix timestamp. /// /// A naive timestamp is defined by the number of seconds since the Unix epoch, /// defined as January 1st 1970 12:00 AM UTC. This does *not* have nanosecond precision. /// /// Naive timestamps have no notion of timezone.They are generally not used except when /// dealing with [`TimeZone`] calculations. /// /// To convert a [`DateTime`] into a [`NaiveTimestamp`], the [`From`] trait should be used. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[repr(transparent)] pub(crate) struct NaiveTimestamp(pub(crate) i64); impl<Tz> From<DateTime<Tz>> for NaiveTimestamp where Tz: TimeZone, { fn from(dt: DateTime<Tz>) -> Self { let ts = dt.days_since_epoch() as i64 * 86400 + dt.hour() as i64 * 3600 + dt.minute() as i64 * 60 + dt.second() as i64; Self(ts) } } impl From<i64> for NaiveTimestamp { fn from(s: i64) -> Self { Self(s) } } impl NaiveTimestamp { /// The minimum valid number of seconds pub(crate) const MIN_VALID: i64 = MIN_EPOCH_DAYS as i64 * 86400; pub(crate) const MAX_VALID: i64 = MAX_EPOCH_DAYS as i64 * 86400 + (23 * 3600) + (59 * 60) + 59; /// Creates a new [`NaiveTimestamp`] from the given date and time. pub(crate) const fn new(date: &eos::Date, time: &eos::Time) -> Self { let ts = date.days_since_epoch() as i64 * 86400 + time.hour() as i64 * 3600 + time.minute() as i64 * 60 + time.second() as i64; Self(ts) } /// Creates a new [`NaiveTimestamp`] from the given number of seconds. pub(crate) const fn from_seconds(secs: i64) -> Self { Self(secs) } /// Returns the inner value. These are the number of seconds. pub(crate) const fn into_inner(self) -> i64 { self.0 } /// Converts the naive timestamp into a UTC [`DateTime`]. pub(crate) fn to_utc(self) -> DateTime<Utc> { eos::Timestamp::from_seconds(self.0).to_utc() } // Turns a NaiveTimestamp into a eos::Timestamp from a UtcOffset pub(crate) fn to_regular(self, offset: &eos::UtcOffset) -> eos::Timestamp { eos::Timestamp::from_seconds(self.0 - offset.total_seconds() as i64) } } impl std::fmt::Debug for NaiveTimestamp { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Give the NaiveTimestamp some more important debugging information // such as the UTC time if self.0 >= Self::MIN_VALID && self.0 <= Self::MAX_VALID { write!(f, "NaiveTimestamp({}, \"{}\")", &self.0, self.to_utc()) } else { write!(f, "NaiveTimestamp({})", &self.0) } } } impl From<eos::Timestamp> for NaiveTimestamp { #[inline] fn from(ts: eos::Timestamp) -> Self { Self(ts.as_seconds()) } } #[cfg(test)] mod tests { use super::*; use eos::datetime; #[test] fn test_to_utc() { let dt = datetime!(2021-01-12 12:34 -05:00); assert_eq!(NaiveTimestamp::from(dt).to_utc(), datetime!(2021-01-12 12:34)); } }
30.682692
99
0.605766
9b653e3b6e4f3c26b3034184ca5bbb2f72c13ff0
2,813
use crate::day_tasks; use std::collections::HashSet; use num_integer::gcd; use std::f64::consts::PI; pub struct Day10; impl day_tasks::DayTasks for Day10 { fn day_number (&self) -> String { "10".to_string() } fn task_0 (&self, input: &String) -> String { let asteroid_points = get_asteroid_points(input); let mut max = 0; let mut max_position = (-1, -1); for position in &asteroid_points { let count = count_sightable_asteroids(*position, &asteroid_points); if max < count { max = count; max_position = *position; } } format!("{} coordinates = {:?}", max, max_position) } fn task_1 (&self, input: &String) -> String { let asteroid_points = get_asteroid_points(input); let set = get_visible_aseteroids((8, 16), &asteroid_points); let mut sorted = set .into_iter() .map(|(x, y)| { let angle = (-(y as f64) / ((x * x + y * y) as f64).sqrt()).acos(); let angle = if x < 0 { 2.0 * PI - angle } else { angle }; (angle, x, y) }) .collect::<Vec<(f64, i32, i32)>>(); sorted.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); sorted .into_iter() .nth(199) .map(|(_, x, y)| { let (x, y) = (x + 8, y + 16); (x * 100 + y).to_string()}) .unwrap_or("- Something went wrong -".to_string()) } } fn get_asteroid_points (input: &String) -> Vec<(i32, i32)> { input .lines() .enumerate() .flat_map(|(y, line)| line .chars() .enumerate() .filter_map(move |(x, c)| if c == '#' { Some((x as i32, y as i32)) } else { None })) .collect() } fn count_sightable_asteroids (position: (i32, i32), all_asteroid_positions: &Vec<(i32, i32)>) -> i32 { get_visible_aseteroids(position, &all_asteroid_positions).into_iter().count() as i32 } fn get_visible_aseteroids (position: (i32, i32), all_asteroid_positions: &Vec<(i32, i32)>) -> HashSet<(i32, i32)> { let distances = all_asteroid_positions .into_iter() .filter(|p| **p != position) .map(|(x, y)| (*x - position.0, *y - position.1)) .map(|(x, y)| { if x != 0 && y != 0 { let gcd = gcd(x, y); (x / gcd, y / gcd) } else if x == 0 && y >= 1 { (0, 1) } else if x == 0 && y <= 1 { (0, -1) } else if y == 0 && x >= 1 { (1, 0) } else { (-1, 0) } }) .collect::<HashSet<(i32, i32)>>(); distances }
31.606742
115
0.467828
0ad406f3ff3e91022e637b719e02e0aaf603909e
1,317
use std::fmt; struct City { description: String, residents: u64, is_coastal: bool, } struct Point { x: f32, y: f32, z: f32, } impl fmt::Display for Point { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "(x: {}, y: {}, z: {})", self.x, self.y, self.z) } } fn new_point(x: f32, y: f32, z: f32) -> Point { Point { x, y, z } } fn new_city(residents: u64, is_coastal: bool) -> City { if is_coastal { City { description: format!("a *coastal* city of approximately {} residents", residents), residents, is_coastal, } } else { City { description: format!( "a *non-coastal* city of approximately {} residents", residents ), residents, is_coastal, } } } fn main() { let rustville: City = new_city(133742069, false); let point_a = new_point(12.1, 12.33, 0.2); println!("This city can be described as: {}", rustville.description); println!("This city has {} residents", rustville.residents); println!("Your point is {}", point_a); if rustville.is_coastal { println!("It is a coastal city."); } else { println!("It is not a coastal city."); } }
21.95
94
0.529992
c14482f2ec3eae64fd0d08687c2b00bb95ed5776
7,718
// // Copyright 2022 The Project Oak Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! This module provides structs for representing a Rekor LogEntry, as well as logic for parsing and //! verifying signatures in a Rekor LogEntry. use anyhow::Context; use ecdsa::Signature; use serde::{Deserialize, Serialize}; use signature::Verifier; use std::str::FromStr; /// Struct representing a Rekor LogEntry. #[derive(Debug, Deserialize, PartialEq, Serialize)] pub struct LogEntry { #[serde(rename = "body")] pub body: String, #[serde(rename = "integratedTime")] pub integrated_time: usize, // TODO(#2316): should this be verified? /// This is the SHA256 hash of the DER-encoded public key for the log at the time the entry was /// included in the log /// Pattern: ^[0-9a-fA-F]{64}$ #[serde(rename = "logID")] pub log_id: String, /// Minimum: 0 #[serde(rename = "logIndex")] pub log_index: u64, /// Includes a signature over the body, integratedTime, logID, and logIndex. #[serde(skip_serializing_if = "Option::is_none")] #[serde(rename = "verification")] pub verification: Option<LogEntryVerification>, } /// Struct representing a verification object in a Rekor LogEntry. The verification object in Rekor /// also contains an inclusion proof. Since we currently don't verify the inclusion proof in the /// client, it is omitted from this struct. #[derive(Debug, Deserialize, PartialEq, Serialize)] pub struct LogEntryVerification { // Base64-encoded signature over the body, integratedTime, logID, and logIndex. #[serde(rename = "signedEntryTimestamp")] pub signed_entry_timestamp: String, } /// Convenient struct for verifying the `signedEntryTimestamp` in a Rekor LogEntry. /// /// This bundle can be verified using the public key from Rekor. The public key can /// be obtained from the `/api/v1/log/publicKey` Rest API. For `sigstore.dev`, it is a PEM-encoded /// x509/PKIX public key. The PEM-encoded content can be parsed into a `p256::ecdsa::VerifyingKey` /// using `unmarshal_pem_to_p256_public_key`. pub struct RekorSignatureBundle { /// Canonicalized JSON representation, based on RFC 8785 rules, of a subset of a Rekor LogEntry /// fields that are signed to generate `signedEntryTimestamp` (also a field in the Rekor /// LogEntry). These fields include body, integratedTime, logID and logIndex. pub canonicalized: Vec<u8>, /// Signature over the canonicalized JSON document. This is obtained by decoding the /// Base64-encoded `signedEntryTimestamp` in the Rekor LogEntry. pub signature: p256::ecdsa::Signature, } /// Converter for creating a RekorSignatureBundle from a Rekor LogEntry as described in /// <https://github.com/sigstore/rekor/blob/4fcdcaa58fd5263560a82978d781eb64f5c5f93c/openapi.yaml#L433-L476>. impl TryFrom<&LogEntry> for RekorSignatureBundle { type Error = anyhow::Error; fn try_from(log_entry: &LogEntry) -> anyhow::Result<Self> { // Create a copy of the LogEntry, but skip the verification. let entry_subset = LogEntry { body: log_entry.body.clone(), integrated_time: log_entry.integrated_time, log_id: log_entry.log_id.clone(), log_index: log_entry.log_index, verification: None, }; // Canonicalized JSON document that is signed. Canonicalization should follow the RFC 8785 // rules. let canonicalized = serde_jcs::to_string(&entry_subset) .context("couldn't create canonicalized json string")?; let canonicalized = canonicalized.as_bytes().to_vec(); // Extract the signature from the LogEntry. let sig_base64 = log_entry .verification .as_ref() .ok_or(anyhow::anyhow!("no verification field in the log entry"))? .signed_entry_timestamp .clone(); let sig_bytes = sig_base64.as_str().as_bytes(); let sig = base64::decode(sig_bytes).context("couldn't decode Base64 signedEntryTimestamp")?; let signature = Signature::from_der(&sig).context("invalid ASN.1 signature")?; Ok(Self { canonicalized, signature, }) } } /// Verifies a Rekor LogEntry. /// /// The verification involves the following: /// /// 1. verifying the signature in `signedEntryTimestamp`, using Rekor's public key, /// 1. verifying the signature in `body.RekordObj.signature`, using Oak's public key, /// 1. verifying that the content of the body matches the input `endorsement_bytes`. /// /// Returns `Ok(())` if the verification succeeds, otherwise returns `Err()`. pub fn verify_rekor_log_entry( log_entry_bytes: &[u8], pem_encoded_public_key_bytes: &[u8], _oak_public_key_bytes: &[u8], _endorsement_bytes: &[u8], ) -> anyhow::Result<()> { verify_rekor_signature(log_entry_bytes, pem_encoded_public_key_bytes)?; let parsed: std::collections::HashMap<String, LogEntry> = serde_json::from_slice(log_entry_bytes) .context("couldn't parse bytes into a LogEntry object.")?; let _entry = parsed.values().next().context("no entry in the map")?; // TODO(#2316): entry.body is base64 encoded. It should be decoded to extract content and // signature from it. // TODO(#2316): verify signature in the body using oak's public key // TODO(#2316): check that the endorsement has the same hash as the one in the body. Ok(()) } /// Parses `log_entry_bytes` into a Rekor LogEntry, and verifies the signature in /// signedEntryTimestamp using the public key in `pem_encoded_public_key_bytes`. /// /// `log_entry_bytes`: LogEntry downloaded from Rekor as a byte array. /// `pem_encoded_public_key_bytes`: PEM-encoded public key of Rekor as a byte array. /// /// Returns `Ok(())` if the verification succeeds, otherwise returns `Err()`. pub fn verify_rekor_signature( log_entry_bytes: &[u8], pem_encoded_public_key_bytes: &[u8], ) -> anyhow::Result<()> { let signature_bundle = rekor_signature_bundle(log_entry_bytes)?; let key = unmarshal_pem_to_p256_public_key(pem_encoded_public_key_bytes)?; key.verify(&signature_bundle.canonicalized, &signature_bundle.signature) .context("failed to verify signedEntryTimestamp of the Rekor LogEntry") } /// Parses a PEM-encoded x509/PKIX public key into a `p256::ecdsa::VerifyingKey`. /// /// `pem_bytes`: A PEM-encoded public key as a byte array. pub fn unmarshal_pem_to_p256_public_key( pem_bytes: &[u8], ) -> anyhow::Result<p256::ecdsa::VerifyingKey> { let pem_str = std::str::from_utf8(pem_bytes).context("couldn't convert bytes to string")?; p256::ecdsa::VerifyingKey::from_str(pem_str) .context("couldn't parse pem as a p256::ecdsa::VerifyingKey") } fn rekor_signature_bundle(log_entry_bytes: &[u8]) -> anyhow::Result<RekorSignatureBundle> { let parsed: std::collections::HashMap<String, LogEntry> = serde_json::from_slice(log_entry_bytes) .context("couldn't parse bytes into a LogEntry object.")?; let entry = parsed.values().next().context("no entry in the map")?; RekorSignatureBundle::try_from(entry) }
41.272727
109
0.701477
ddf836965dff7cd1aee3f849461e1c1659f49053
6,427
use std::borrow::Cow; use chrono::Duration; use time::Tm; use super::{Cookie, SameSite}; /// Structure that follows the builder pattern for building `Cookie` structs. /// /// To construct a cookie: /// /// 1. Call [`Cookie::build`](struct.Cookie.html#method.build) to start building. /// 2. Use any of the builder methods to set fields in the cookie. /// 3. Call [finish](#method.finish) to retrieve the built cookie. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let cookie: Cookie = Cookie::build("name", "value") /// .domain("www.rust-lang.org") /// .path("/") /// .secure(true) /// .http_only(true) /// .max_age(84600) /// .finish(); /// ``` #[derive(Debug, Clone)] pub struct CookieBuilder { /// The cookie being built. cookie: Cookie<'static>, } impl CookieBuilder { /// Creates a new `CookieBuilder` instance from the given name and value. /// /// This method is typically called indirectly via /// [Cookie::build](struct.Cookie.html#method.build). /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar").finish(); /// assert_eq!(c.name_value(), ("foo", "bar")); /// ``` pub fn new<N, V>(name: N, value: V) -> CookieBuilder where N: Into<Cow<'static, str>>, V: Into<Cow<'static, str>>, { CookieBuilder { cookie: Cookie::new(name, value), } } /// Sets the `expires` field in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .expires(time::now()) /// .finish(); /// /// assert!(c.expires().is_some()); /// ``` #[inline] pub fn expires(mut self, when: Tm) -> CookieBuilder { self.cookie.set_expires(when); self } /// Sets the `max_age` field in seconds in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .max_age(1800) /// .finish(); /// /// assert_eq!(c.max_age(), Some(time::Duration::seconds(30 * 60))); /// ``` #[inline] pub fn max_age(self, seconds: i64) -> CookieBuilder { self.max_age_time(Duration::seconds(seconds)) } /// Sets the `max_age` field in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .max_age_time(time::Duration::minutes(30)) /// .finish(); /// /// assert_eq!(c.max_age(), Some(time::Duration::seconds(30 * 60))); /// ``` #[inline] pub fn max_age_time(mut self, value: Duration) -> CookieBuilder { self.cookie.set_max_age(value); self } /// Sets the `domain` field in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .domain("www.rust-lang.org") /// .finish(); /// /// assert_eq!(c.domain(), Some("www.rust-lang.org")); /// ``` pub fn domain<D: Into<Cow<'static, str>>>(mut self, value: D) -> CookieBuilder { self.cookie.set_domain(value); self } /// Sets the `path` field in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .path("/") /// .finish(); /// /// assert_eq!(c.path(), Some("/")); /// ``` pub fn path<P: Into<Cow<'static, str>>>(mut self, path: P) -> CookieBuilder { self.cookie.set_path(path); self } /// Sets the `secure` field in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .secure(true) /// .finish(); /// /// assert_eq!(c.secure(), Some(true)); /// ``` #[inline] pub fn secure(mut self, value: bool) -> CookieBuilder { self.cookie.set_secure(value); self } /// Sets the `http_only` field in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .http_only(true) /// .finish(); /// /// assert_eq!(c.http_only(), Some(true)); /// ``` #[inline] pub fn http_only(mut self, value: bool) -> CookieBuilder { self.cookie.set_http_only(value); self } /// Sets the `same_site` field in the cookie being built. /// /// # Example /// /// ```rust /// use actori_http::cookie::{Cookie, SameSite}; /// /// let c = Cookie::build("foo", "bar") /// .same_site(SameSite::Strict) /// .finish(); /// /// assert_eq!(c.same_site(), Some(SameSite::Strict)); /// ``` #[inline] pub fn same_site(mut self, value: SameSite) -> CookieBuilder { self.cookie.set_same_site(value); self } /// Makes the cookie being built 'permanent' by extending its expiration and /// max age 20 years into the future. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// use chrono::Duration; /// /// let c = Cookie::build("foo", "bar") /// .permanent() /// .finish(); /// /// assert_eq!(c.max_age(), Some(Duration::days(365 * 20))); /// # assert!(c.expires().is_some()); /// ``` #[inline] pub fn permanent(mut self) -> CookieBuilder { self.cookie.make_permanent(); self } /// Finishes building and returns the built `Cookie`. /// /// # Example /// /// ```rust /// use actori_http::cookie::Cookie; /// /// let c = Cookie::build("foo", "bar") /// .domain("crates.io") /// .path("/") /// .finish(); /// /// assert_eq!(c.name_value(), ("foo", "bar")); /// assert_eq!(c.domain(), Some("crates.io")); /// assert_eq!(c.path(), Some("/")); /// ``` #[inline] pub fn finish(self) -> Cookie<'static> { self.cookie } }
25.605578
84
0.508169
2f7f57397a699a0797cf7747298eecb7e1695e68
3,853
extern crate binance; use binance::api::*; use binance::general::*; use binance::account::*; use binance::market::*; fn main() { general(); account(); market_data(); } fn general() { let general: General = Binance::new(None, None); let ping = general.ping(); match ping { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } let result = general.get_server_time(); match result { Ok(answer) => println!("Server Time: {}", answer.server_time), Err(e) => println!("Error: {}", e), } } fn account() { let api_key = Some("YOUR_API_KEY".into()); let secret_key = Some("YOUR_SECRET_KEY".into()); let account: Account = Binance::new(api_key, secret_key); match account.get_account() { Ok(answer) => println!("{:?}", answer.balances), Err(e) => println!("Error: {}", e), } match account.get_open_orders("WTCETH") { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } match account.limit_buy("WTCETH", 10, 0.014000) { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } match account.market_buy("WTCETH", 5) { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } match account.limit_sell("WTCETH", 10, 0.035000) { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } match account.market_sell("WTCETH", 5) { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } let order_id = 1_957_528; match account.order_status("WTCETH", order_id) { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } match account.cancel_order("WTCETH", order_id) { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } match account.get_balance("KNC") { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } match account.trade_history("WTCETH") { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } } fn market_data() { let market: Market = Binance::new(None, None); // Order book match market.get_depth("BNBETH") { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } // Latest price for ALL symbols match market.get_all_prices() { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } // Latest price for ONE symbol match market.get_price("KNCETH") { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } // Best price/qty on the order book for ALL symbols match market.get_all_book_tickers() { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } // Best price/qty on the order book for ONE symbol match market.get_book_ticker("BNBETH") { Ok(answer) => println!( "Bid Price: {}, Ask Price: {}", answer.bid_price, answer.ask_price ), Err(e) => println!("Error: {}", e), } // 24hr ticker price change statistics match market.get_24h_price_stats("BNBETH") { Ok(answer) => println!( "Open Price: {}, Higher Price: {}, Lower Price: {:?}", answer.open_price, answer.high_price, answer.low_price ), Err(e) => println!("Error: {}", e), } // last 10 5min klines (candlesticks) for a symbol: match market.get_klines("BNBETH", "5m", 10, None, None) { Ok(answer) => println!("{:?}", answer), Err(e) => println!("Error: {}", e), } }
27.92029
70
0.521671
298761cd6ae510d5a70853021ce0d439f48a64b1
6,634
use crate::cshadow; use crate::host::context::{ThreadContext, ThreadContextObjs}; use crate::host::descriptor::{CompatDescriptor, DescriptorFlags, FileStatus, PosixFile}; use crate::host::syscall; use crate::host::syscall_types::SyscallResult; use crate::host::syscall_types::{SysCallArgs, SysCallReg}; use log::*; use nix::errno::Errno; use nix::fcntl::OFlag; use std::convert::{TryFrom, TryInto}; use std::os::unix::prelude::RawFd; fn fcntl(ctx: &mut ThreadContext, args: &SysCallArgs) -> SyscallResult { let fd: RawFd = args.args[0].into(); let cmd: i32 = args.args[1].into(); // get the descriptor, or return early if it doesn't exist let desc = match syscall::get_descriptor_mut(ctx.process, fd)? { CompatDescriptor::New(d) => d, // if it's a legacy descriptor, use the C syscall handler instead CompatDescriptor::Legacy(_) => { return unsafe { cshadow::syscallhandler_fcntl( ctx.thread.csyscallhandler(), args as *const cshadow::SysCallArgs, ) } .into() } }; Ok(match cmd { libc::F_GETFL => { let file = desc.get_file().borrow(); // combine the file status and access mode flags let flags = file.get_status().as_o_flags() | file.mode().as_o_flags(); SysCallReg::from(flags.bits()) } libc::F_SETFL => { let mut status = OFlag::from_bits(i32::from(args.args[2])).ok_or(Errno::EINVAL)?; // remove access mode flags status.remove(OFlag::O_RDONLY | OFlag::O_WRONLY | OFlag::O_RDWR | OFlag::O_PATH); // remove file creation flags status.remove( OFlag::O_CLOEXEC | OFlag::O_CREAT | OFlag::O_DIRECTORY | OFlag::O_EXCL | OFlag::O_NOCTTY | OFlag::O_NOFOLLOW | OFlag::O_TMPFILE | OFlag::O_TRUNC, ); let mut file = desc.get_file().borrow_mut(); let old_flags = file.get_status().as_o_flags(); // fcntl(2): "On Linux, this command can change only the O_APPEND, O_ASYNC, O_DIRECT, // O_NOATIME, and O_NONBLOCK flags" let update_mask = OFlag::O_APPEND | OFlag::O_ASYNC | OFlag::O_DIRECT | OFlag::O_NOATIME | OFlag::O_NONBLOCK; // The proper way for the process to update its flags is to: // int flags = fcntl(fd, F_GETFL); // flags = flags | O_NONBLOCK; // add O_NONBLOCK // fcntl(fd, F_SETFL, flags); // So if there are flags that we can't update, we should assume they are leftover // from the F_GETFL and we shouldn't return an error. This includes `O_DSYNC` and // `O_SYNC`, which fcntl(2) says: // "It is not possible to use F_SETFL to change the state of the O_DSYNC and O_SYNC // flags. Attempts to change the state of these flags are silently ignored." // In other words, the following code should always be valid: // int flags = fcntl(fd, F_GETFL); // fcntl(fd, F_SETFL, flags); // set to the current existing flags // keep the old flags that we can't change, and use the new flags that we can change let status = (old_flags & !update_mask) | (status & update_mask); let (status, remaining) = FileStatus::from_o_flags(status); // check if there are flags that we don't support but Linux does if !remaining.is_empty() { return Err(Errno::EINVAL.into()); } file.set_status(status); SysCallReg::from(0) } libc::F_GETFD => { let flags = desc.get_flags().bits(); // the only descriptor flag supported by Linux is FD_CLOEXEC, so let's make sure // we're returning the correct value debug_assert!(flags == 0 || flags == libc::FD_CLOEXEC); SysCallReg::from(flags) } libc::F_SETFD => { let flags = DescriptorFlags::from_bits(i32::from(args.args[2])).ok_or(Errno::EINVAL)?; desc.set_flags(flags); SysCallReg::from(0) } libc::F_DUPFD => { let min_fd: i32 = args.args[2].into(); let min_fd: u32 = min_fd.try_into().map_err(|_| nix::errno::Errno::EINVAL)?; let new_desc = CompatDescriptor::New(desc.dup(DescriptorFlags::empty())); let new_fd = ctx .process .register_descriptor_with_min_fd(new_desc, min_fd); SysCallReg::from(i32::try_from(new_fd).unwrap()) } libc::F_DUPFD_CLOEXEC => { let min_fd: i32 = args.args[2].into(); let min_fd: u32 = min_fd.try_into().map_err(|_| nix::errno::Errno::EINVAL)?; let new_desc = CompatDescriptor::New(desc.dup(DescriptorFlags::CLOEXEC)); let new_fd = ctx .process .register_descriptor_with_min_fd(new_desc, min_fd); SysCallReg::from(i32::try_from(new_fd).unwrap()) } libc::F_GETPIPE_SZ => { #[allow(irrefutable_let_patterns)] if let PosixFile::Pipe(pipe) = desc.get_file() { SysCallReg::from(i32::try_from(pipe.borrow().max_size()).unwrap()) } else { return Err(Errno::EINVAL.into()); } } _ => return Err(Errno::EINVAL.into()), }) } mod export { use super::*; use crate::utility::notnull::notnull_mut_debug; #[no_mangle] pub extern "C" fn rustsyscallhandler_fcntl( sys: *mut cshadow::SysCallHandler, args: *const cshadow::SysCallArgs, ) -> cshadow::SysCallReturn { let mut objs = unsafe { ThreadContextObjs::from_syscallhandler(notnull_mut_debug(sys)) }; fcntl(&mut objs.borrow(), unsafe { args.as_ref().unwrap() }).into() } #[no_mangle] pub extern "C" fn rustsyscallhandler_fcntl64( sys: *mut cshadow::SysCallHandler, args: *const cshadow::SysCallArgs, ) -> cshadow::SysCallReturn { // Our fcntl supports the flock64 struct when any of the F_GETLK64, F_SETLK64, and F_SETLKW64 // commands are specified, so we can just use our fcntl handler directly. trace!("fcntl64 called, forwarding to fcntl handler"); rustsyscallhandler_fcntl(sys, args) } }
41.204969
101
0.569189
0ad3032aac681ea1e4627072944988fe9c32440f
54,235
// Copyright 2019 WHTCORPS INC Project Authors. Licensed under Apache-2.0. use smallvec::SmallVec; use std::collections::HashSet; use std::sync::Arc; use ekvproto::interlock::KeyCone; use milevadb_query_datatype::{EvalType, FieldTypeAccessor}; use violetabftstore::interlock::::collections::HashMap; use fidel_timeshare::PrimaryCausetInfo; use fidel_timeshare::FieldType; use fidel_timeshare::BlockScan; use super::util::scan_executor::*; use crate::interface::*; use milevadb_query_common::causet_storage::{IntervalCone, causet_storage}; use milevadb_query_common::Result; use milevadb_query_datatype::codec::batch::{LazyBatchPrimaryCauset, LazyBatchPrimaryCausetVec}; use milevadb_query_datatype::codec::Evcausetidx; use milevadb_query_datatype::expr::{EvalConfig, EvalContext}; pub struct BatchBlockScanFreeDaemon<S: causet_storage>(ScanFreeDaemon<S, BlockScanFreeDaemonImpl>); type HandleIndicesVec = SmallVec<[usize; 2]>; // We assign a dummy type `Box<dyn causet_storage<Statistics = ()>>` so that we can omit the type // when calling `check_supported`. impl BatchBlockScanFreeDaemon<Box<dyn causet_storage<Statistics = ()>>> { /// Checks whether this executor can be used. #[inline] pub fn check_supported(descriptor: &BlockScan) -> Result<()> { check_PrimaryCausets_info_supported(descriptor.get_PrimaryCausets()) } } impl<S: causet_storage> BatchBlockScanFreeDaemon<S> { pub fn new( causet_storage: S, config: Arc<EvalConfig>, PrimaryCausets_info: Vec<PrimaryCausetInfo>, key_cones: Vec<KeyCone>, primary_PrimaryCauset_ids: Vec<i64>, is_backward: bool, is_scanned_cone_aware: bool, ) -> Result<Self> { let is_PrimaryCauset_filled = vec![false; PrimaryCausets_info.len()]; let mut is_key_only = true; let mut handle_indices = HandleIndicesVec::new(); let mut schemaReplicant = Vec::with_capacity(PrimaryCausets_info.len()); let mut PrimaryCausets_default_value = Vec::with_capacity(PrimaryCausets_info.len()); let mut PrimaryCauset_id_index = HashMap::default(); let primary_PrimaryCauset_ids_set = primary_PrimaryCauset_ids.iter().collect::<HashSet<_>>(); for (index, mut ci) in PrimaryCausets_info.into_iter().enumerate() { // For each PrimaryCauset info, we need to extract the following info: // - Corresponding field type (push into `schemaReplicant`). schemaReplicant.push(field_type_from_PrimaryCauset_info(&ci)); // - Prepare PrimaryCauset default value (will be used to fill missing PrimaryCauset later). PrimaryCausets_default_value.push(ci.take_default_val()); // - CausetStore the index of the PK handles. // - Check whether or not we don't need KV values (iff PK handle is given). if ci.get_pk_handle() { handle_indices.push(index); } else { if !primary_PrimaryCauset_ids_set.contains(&ci.get_PrimaryCauset_id()) { is_key_only = false; } PrimaryCauset_id_index.insert(ci.get_PrimaryCauset_id(), index); } // Note: if two PK handles are given, we will only preserve the *last* one. Also if two // PrimaryCausets with the same PrimaryCauset id are given, we will only preserve the *last* one. } let no_common_handle = primary_PrimaryCauset_ids.is_empty(); let imp = BlockScanFreeDaemonImpl { context: EvalContext::new(config), schemaReplicant, PrimaryCausets_default_value, PrimaryCauset_id_index, handle_indices, primary_PrimaryCauset_ids, is_PrimaryCauset_filled, }; let wrapper = ScanFreeDaemon::new(ScanFreeDaemonOptions { imp, causet_storage, key_cones, is_backward, is_key_only, accept_point_cone: no_common_handle, is_scanned_cone_aware, })?; Ok(Self(wrapper)) } } impl<S: causet_storage> BatchFreeDaemon for BatchBlockScanFreeDaemon<S> { type StorageStats = S::Statistics; #[inline] fn schemaReplicant(&self) -> &[FieldType] { self.0.schemaReplicant() } #[inline] fn next_batch(&mut self, scan_rows: usize) -> BatchExecuteResult { self.0.next_batch(scan_rows) } #[inline] fn collect_exec_stats(&mut self, dest: &mut ExecuteStats) { self.0.collect_exec_stats(dest); } #[inline] fn collect_causet_storage_stats(&mut self, dest: &mut Self::StorageStats) { self.0.collect_causet_storage_stats(dest); } #[inline] fn take_scanned_cone(&mut self) -> IntervalCone { self.0.take_scanned_cone() } #[inline] fn can_be_cached(&self) -> bool { self.0.can_be_cached() } } struct BlockScanFreeDaemonImpl { /// Note: Although called `EvalContext`, it is some kind of execution context instead. // TODO: Rename EvalContext to ExecContext. context: EvalContext, /// The schemaReplicant of the output. All of the output come from specific PrimaryCausets in the underlying /// causet_storage. schemaReplicant: Vec<FieldType>, /// The default value of corresponding PrimaryCausets in the schemaReplicant. When PrimaryCauset data is missing, /// the default value will be used to fill the output. PrimaryCausets_default_value: Vec<Vec<u8>>, /// The output position in the schemaReplicant giving the PrimaryCauset id. PrimaryCauset_id_index: HashMap<i64, usize>, /// Vec of indices in output Evcausetidx to put the handle. The indices must be sorted in the vec. handle_indices: HandleIndicesVec, /// Vec of Primary key PrimaryCauset's IDs. primary_PrimaryCauset_ids: Vec<i64>, /// A vector of flags indicating whether corresponding PrimaryCauset is filled in `next_batch`. /// It is a struct level field in order to prevent repeated memory allocations since its length /// is fixed for each `next_batch` call. is_PrimaryCauset_filled: Vec<bool>, } impl BlockScanFreeDaemonImpl { fn process_v1( &mut self, key: &[u8], value: &[u8], PrimaryCausets: &mut LazyBatchPrimaryCausetVec, decoded_PrimaryCausets: &mut usize, ) -> Result<()> { use codec::prelude::NumberDecoder; use milevadb_query_datatype::codec::datum; // The layout of value is: [col_id_1, value_1, col_id_2, value_2, ...] // where each element is datum encoded. // The PrimaryCauset id datum must be in var i64 type. let PrimaryCausets_len = PrimaryCausets.PrimaryCausets_len(); let mut remaining = value; while !remaining.is_empty() && *decoded_PrimaryCausets < PrimaryCausets_len { if remaining[0] != datum::VAR_INT_FLAG { return Err(other_err!( "Unable to decode Evcausetidx: PrimaryCauset id must be VAR_INT" )); } remaining = &remaining[1..]; let PrimaryCauset_id = box_try!(remaining.read_var_i64()); let (val, new_remaining) = datum::split_datum(remaining, false)?; // Note: The produced PrimaryCausets may be not in the same length if there is error due // to corrupted data. It will be handled in `ScanFreeDaemon`. let some_index = self.PrimaryCauset_id_index.get(&PrimaryCauset_id); if let Some(index) = some_index { let index = *index; if !self.is_PrimaryCauset_filled[index] { PrimaryCausets[index].mut_raw().push(val); *decoded_PrimaryCausets += 1; self.is_PrimaryCauset_filled[index] = true; } else { // This indicates that there are duplicated elements in the Evcausetidx, which is // unexpected. We won't abort the request or overwrite the previous element, // but will output a log anyway. warn!( "Ignored duplicated Evcausetidx datum in Block scan"; "key" => hex::encode_upper(&key), "value" => hex::encode_upper(&value), "dup_PrimaryCauset_id" => PrimaryCauset_id, ); } } remaining = new_remaining; } Ok(()) } fn process_v2( &mut self, value: &[u8], PrimaryCausets: &mut LazyBatchPrimaryCausetVec, decoded_PrimaryCausets: &mut usize, ) -> Result<()> { use milevadb_query_datatype::codec::datum; use milevadb_query_datatype::codec::Evcausetidx::v2::{EventSlice, V1CompatibleEncoder}; let Evcausetidx = EventSlice::from_bytes(value)?; for (col_id, idx) in &self.PrimaryCauset_id_index { if let Some((spacelike, offset)) = Evcausetidx.search_in_non_null_ids(*col_id)? { let mut buffer_to_write = PrimaryCausets[*idx].mut_raw().begin_concat_extlightlike(); buffer_to_write .write_v2_as_datum(&Evcausetidx.values()[spacelike..offset], &self.schemaReplicant[*idx])?; *decoded_PrimaryCausets += 1; self.is_PrimaryCauset_filled[*idx] = true; } else if Evcausetidx.search_in_null_ids(*col_id) { PrimaryCausets[*idx].mut_raw().push(datum::DATUM_DATA_NULL); *decoded_PrimaryCausets += 1; self.is_PrimaryCauset_filled[*idx] = true; } else { // This PrimaryCauset is missing. It will be filled with default values later. } } Ok(()) } } impl ScanFreeDaemonImpl for BlockScanFreeDaemonImpl { #[inline] fn schemaReplicant(&self) -> &[FieldType] { &self.schemaReplicant } #[inline] fn mut_context(&mut self) -> &mut EvalContext { &mut self.context } /// Constructs empty PrimaryCausets, with PK in decoded format and the rest in raw format. fn build_PrimaryCauset_vec(&self, scan_rows: usize) -> LazyBatchPrimaryCausetVec { let PrimaryCausets_len = self.schemaReplicant.len(); let mut PrimaryCausets = Vec::with_capacity(PrimaryCausets_len); // If there are any PK PrimaryCausets, for each of them, fill non-PK PrimaryCausets before it and push the // PK PrimaryCauset. // For example, consider: // non-pk non-pk non-pk pk non-pk non-pk pk pk non-pk non-pk // handle_indices: ^3 ^6 ^7 // Each turn of the following loop will push this to `PrimaryCausets`: // 1st turn: [non-pk, non-pk, non-pk, pk] // 2nd turn: [non-pk, non-pk, pk] // 3rd turn: [pk] let mut last_index = 0usize; for handle_index in &self.handle_indices { // `handle_indices` is expected to be sorted. assert!(*handle_index >= last_index); // Fill last `handle_index - 1` PrimaryCausets. for _ in last_index..*handle_index { PrimaryCausets.push(LazyBatchPrimaryCauset::raw_with_capacity(scan_rows)); } // For PK handles, we construct a decoded `VectorValue` because it is directly // stored as i64, without a datum flag, at the lightlike of key. PrimaryCausets.push(LazyBatchPrimaryCauset::decoded_with_capacity_and_tp( scan_rows, EvalType::Int, )); last_index = *handle_index + 1; } // Then fill remaining PrimaryCausets after the last handle PrimaryCauset. If there are no PK PrimaryCausets, // the previous loop will be skipped and this loop will be run on 0..PrimaryCausets_len. // For the example above, this loop will push: [non-pk, non-pk] for _ in last_index..PrimaryCausets_len { PrimaryCausets.push(LazyBatchPrimaryCauset::raw_with_capacity(scan_rows)); } assert_eq!(PrimaryCausets.len(), PrimaryCausets_len); LazyBatchPrimaryCausetVec::from(PrimaryCausets) } fn process_kv_pair( &mut self, key: &[u8], value: &[u8], PrimaryCausets: &mut LazyBatchPrimaryCausetVec, ) -> Result<()> { use milevadb_query_datatype::codec::{datum, Block}; let PrimaryCausets_len = self.schemaReplicant.len(); let mut decoded_PrimaryCausets = 0; if value.is_empty() || (value.len() == 1 && value[0] == datum::NIL_FLAG) { // Do nothing } else { match value[0] { Evcausetidx::v2::CODEC_VERSION => self.process_v2(value, PrimaryCausets, &mut decoded_PrimaryCausets)?, _ => self.process_v1(key, value, PrimaryCausets, &mut decoded_PrimaryCausets)?, } } if !self.handle_indices.is_empty() { // In this case, An int handle is expected. let handle = Block::decode_int_handle(key)?; for handle_index in &self.handle_indices { // TODO: We should avoid calling `push_int` repeatedly. Instead we should specialize // a `&mut Vec` first. However it is hard to program due to lifetime restriction. if !self.is_PrimaryCauset_filled[*handle_index] { PrimaryCausets[*handle_index].mut_decoded().push_int(Some(handle)); decoded_PrimaryCausets += 1; self.is_PrimaryCauset_filled[*handle_index] = true; } } } else if !self.primary_PrimaryCauset_ids.is_empty() { // Otherwise, if `primary_PrimaryCauset_ids` is not empty, we try to extract the values of the PrimaryCausets from the common handle. let mut handle = Block::decode_common_handle(key)?; for primary_id in self.primary_PrimaryCauset_ids.iter() { let index = self.PrimaryCauset_id_index.get(primary_id); let (datum, remain) = datum::split_datum(handle, false)?; handle = remain; // If the PrimaryCauset info of the coresponding primary PrimaryCauset id is missing, we ignore this slice of the datum. if let Some(&index) = index { if !self.is_PrimaryCauset_filled[index] { PrimaryCausets[index].mut_raw().push(datum); decoded_PrimaryCausets += 1; self.is_PrimaryCauset_filled[index] = true; } } } } else { Block::check_record_key(key)?; } // Some fields may be missing in the Evcausetidx, we push corresponding default value to make all // PrimaryCausets in same length. for i in 0..PrimaryCausets_len { if !self.is_PrimaryCauset_filled[i] { // Missing fields must not be a primary key, so it must be // `LazyBatchPrimaryCauset::raw`. let default_value = if !self.PrimaryCausets_default_value[i].is_empty() { // default value is provided, use the default value self.PrimaryCausets_default_value[i].as_slice() } else if !self.schemaReplicant[i] .as_accessor() .flag() .contains(milevadb_query_datatype::FieldTypeFlag::NOT_NULL) { // NULL is allowed, use NULL datum::DATUM_DATA_NULL } else { return Err(other_err!( "Data is corrupted, missing data for NOT NULL PrimaryCauset (offset = {})", i )); }; PrimaryCausets[i].mut_raw().push(default_value); } else { // Reset to not-filled, prepare for next function call. self.is_PrimaryCauset_filled[i] = false; } } Ok(()) } } #[causet(test)] mod tests { use super::*; use std::iter; use std::sync::Arc; use ekvproto::interlock::KeyCone; use milevadb_query_datatype::{EvalType, FieldTypeAccessor, FieldTypeTp}; use fidel_timeshare::PrimaryCausetInfo; use fidel_timeshare::FieldType; use milevadb_query_common::execute_stats::*; use milevadb_query_common::causet_storage::test_fixture::FixtureStorage; use milevadb_query_common::util::convert_to_prefix_next; use milevadb_query_datatype::codec::batch::LazyBatchPrimaryCausetVec; use milevadb_query_datatype::codec::data_type::*; use milevadb_query_datatype::codec::{datum, Block, Datum}; use milevadb_query_datatype::expr::EvalConfig; /// Test Helper for normal test with fixed schemaReplicant and data. /// Block SchemaReplicant: ID (INT, PK), Foo (INT), Bar (FLOAT, Default 4.5) /// PrimaryCauset id: 1, 2, 4 /// PrimaryCauset offset: 0, 1, 2 /// Block Data: 1, 10, 5.2 /// 3, -5, NULL /// 4, NULL, 4.5 (DEFAULT) /// 5, NULL, 0.1 /// 6, NULL, 4.5 (DEFAULT) struct BlockScanTestHelper { // ID(INT,PK), Foo(INT), Bar(Float,Default 4.5) pub data: Vec<(i64, Option<i64>, Option<Real>)>, pub Block_id: i64, pub PrimaryCausets_info: Vec<PrimaryCausetInfo>, pub field_types: Vec<FieldType>, pub store: FixtureStorage, } impl BlockScanTestHelper { /// create the BlockScanTestHelper with fixed schemaReplicant and data. fn new() -> BlockScanTestHelper { const Block_ID: i64 = 7; // [(row_id, PrimaryCausets)] where each PrimaryCauset: (PrimaryCauset id, datum) let data = vec![ ( 1, vec![ // A full Evcausetidx. (2, Datum::I64(10)), (4, Datum::F64(5.2)), ], ), ( 3, vec![ (4, Datum::Null), // Bar PrimaryCauset is null, even if default value is provided the final result // should be null. (2, Datum::I64(-5)), // Orders should not matter. ], ), ( 4, vec![ (2, Datum::Null), // Bar PrimaryCauset is missing, default value should be used. ], ), ( 5, vec![ // Foo PrimaryCauset is missing, NULL should be used. (4, Datum::F64(0.1)), ], ), ( 6, vec![ // Empty Evcausetidx ], ), ]; let expect_rows = vec![ (1, Some(10), Real::new(5.2).ok()), (3, Some(-5), None), (4, None, Real::new(4.5).ok()), (5, None, Real::new(0.1).ok()), (6, None, Real::new(4.5).ok()), ]; let mut ctx = EvalContext::default(); // The PrimaryCauset info for each PrimaryCauset in `data`. let PrimaryCausets_info = vec![ { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_pk_handle(true); ci.set_PrimaryCauset_id(1); ci }, { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_PrimaryCauset_id(2); ci }, { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::Double); ci.set_PrimaryCauset_id(4); ci.set_default_val(datum::encode_value(&mut ctx, &[Datum::F64(4.5)]).unwrap()); ci }, ]; let field_types = vec![ FieldTypeTp::LongLong.into(), FieldTypeTp::LongLong.into(), FieldTypeTp::Double.into(), ]; let store = { let kv: Vec<_> = data .iter() .map(|(row_id, PrimaryCausets)| { let key = Block::encode_row_key(Block_ID, *row_id); let value = { let Evcausetidx = PrimaryCausets.iter().map(|(_, datum)| datum.clone()).collect(); let col_ids: Vec<_> = PrimaryCausets.iter().map(|(id, _)| *id).collect(); Block::encode_row(&mut ctx, Evcausetidx, &col_ids).unwrap() }; (key, value) }) .collect(); FixtureStorage::from(kv) }; BlockScanTestHelper { data: expect_rows, Block_id: Block_ID, PrimaryCausets_info, field_types, store, } } /// The point cone representation for each Evcausetidx in `data`. fn point_cones(&self) -> Vec<KeyCone> { self.data .iter() .map(|(row_id, _, _)| { let mut r = KeyCone::default(); r.set_spacelike(Block::encode_row_key(self.Block_id, *row_id)); r.set_lightlike(r.get_spacelike().to_vec()); convert_to_prefix_next(r.mut_lightlike()); r }) .collect() } /// Returns whole Block's cones which include point cone and non-point cone. fn mixed_cones_for_whole_Block(&self) -> Vec<KeyCone> { vec![ self.Block_cone(std::i64::MIN, 3), { let mut r = KeyCone::default(); r.set_spacelike(Block::encode_row_key(self.Block_id, 3)); r.set_lightlike(r.get_spacelike().to_vec()); convert_to_prefix_next(r.mut_lightlike()); r }, self.Block_cone(4, std::i64::MAX), ] } fn store(&self) -> FixtureStorage { self.store.clone() } /// index of pk in self.PrimaryCausets_info. fn idx_pk(&self) -> usize { 0 } fn PrimaryCausets_info_by_idx(&self, col_index: &[usize]) -> Vec<PrimaryCausetInfo> { col_index .iter() .map(|id| self.PrimaryCausets_info[*id].clone()) .collect() } /// Get PrimaryCauset's field type by the index in self.PrimaryCausets_info. fn get_field_type(&self, col_idx: usize) -> &FieldType { &self.field_types[col_idx] } /// Returns the cone for handle in [spacelike_id,lightlike_id) fn Block_cone(&self, spacelike_id: i64, lightlike_id: i64) -> KeyCone { let mut cone = KeyCone::default(); cone.set_spacelike(Block::encode_row_key(self.Block_id, spacelike_id)); cone.set_lightlike(Block::encode_row_key(self.Block_id, lightlike_id)); cone } /// Returns the cone for the whole Block. fn whole_Block_cone(&self) -> KeyCone { self.Block_cone(std::i64::MIN, std::i64::MAX) } /// Returns the values spacelike from `spacelike_row` limit `events`. fn get_expect_values_by_cone(&self, spacelike_row: usize, events: usize) -> Vec<VectorValue> { let mut pks = VectorValue::with_capacity(self.data.len(), EvalType::Int); let mut foos = VectorValue::with_capacity(self.data.len(), EvalType::Int); let mut bars = VectorValue::with_capacity(self.data.len(), EvalType::Real); assert!(spacelike_row + events <= self.data.len()); for id in spacelike_row..spacelike_row + events { let (handle, foo, bar) = self.data[id]; pks.push_int(Some(handle)); foos.push_int(foo); bars.push_real(bar); } vec![pks, foos, bars] } /// check whether the data of PrimaryCausets in `col_idxs` are as expected. /// col_idxs: the idx of PrimaryCauset which the `PrimaryCausets` included. fn expect_Block_values( &self, col_idxs: &[usize], spacelike_row: usize, expect_rows: usize, mut PrimaryCausets: LazyBatchPrimaryCausetVec, ) { let values = self.get_expect_values_by_cone(spacelike_row, expect_rows); assert_eq!(PrimaryCausets.PrimaryCausets_len(), col_idxs.len()); assert_eq!(PrimaryCausets.rows_len(), expect_rows); for id in 0..col_idxs.len() { let col_idx = col_idxs[id]; if col_idx == self.idx_pk() { assert!(PrimaryCausets[id].is_decoded()); } else { assert!(PrimaryCausets[id].is_raw()); PrimaryCausets[id] .ensure_all_decoded_for_test( &mut EvalContext::default(), self.get_field_type(col_idx), ) .unwrap(); } assert_eq!(PrimaryCausets[id].decoded(), &values[col_idx]); } } } /// test basic `Blockscan` with cones, /// `col_idxs`: idxs of PrimaryCausets used in scan. /// `batch_expect_rows`: `expect_rows` used in `next_batch`. fn test_basic_scan( helper: &BlockScanTestHelper, cones: Vec<KeyCone>, col_idxs: &[usize], batch_expect_rows: &[usize], ) { let PrimaryCausets_info = helper.PrimaryCausets_info_by_idx(col_idxs); let mut executor = BatchBlockScanFreeDaemon::new( helper.store(), Arc::new(EvalConfig::default()), PrimaryCausets_info, cones, vec![], false, false, ) .unwrap(); let total_rows = helper.data.len(); let mut spacelike_row = 0; for expect_rows in batch_expect_rows { let expect_rows = *expect_rows; let expect_drained = spacelike_row + expect_rows > total_rows; let result = executor.next_batch(expect_rows); assert_eq!(*result.is_drained.as_ref().unwrap(), expect_drained); if expect_drained { // all remaining events are fetched helper.expect_Block_values( col_idxs, spacelike_row, total_rows - spacelike_row, result.physical_PrimaryCausets, ); return; } // we should get expect_rows in this case. helper.expect_Block_values(col_idxs, spacelike_row, expect_rows, result.physical_PrimaryCausets); spacelike_row += expect_rows; } } #[test] fn test_basic() { let helper = BlockScanTestHelper::new(); // cones to scan in each test case let test_cones = vec![ helper.point_cones(), // point scan vec![helper.whole_Block_cone()], // cone scan helper.mixed_cones_for_whole_Block(), // mixed cone scan and point scan ]; // cols to scan in each test case. let test_cols = vec![ // scan single PrimaryCauset vec![0], vec![1], vec![2], // scan multiple PrimaryCausets vec![0, 1], vec![0, 2], vec![1, 2], //PK is the last PrimaryCauset in schemaReplicant vec![2, 1, 0], //PK is the first PrimaryCauset in schemaReplicant vec![0, 1, 2], // PK is in the middle of the schemaReplicant vec![1, 0, 2], ]; // expect_rows used in next_batch for each test case. let test_batch_rows = vec![ // Fetched multiple times but totally it fetched exactly the same number of events // (so that it will be drained next time and at that time no Evcausetidx will be get). vec![1, 1, 1, 1, 1, 1], vec![1, 2, 2, 2], // Fetch a lot of events once. vec![10, 10], ]; for cones in test_cones { for cols in &test_cols { for batch_expect_rows in &test_batch_rows { test_basic_scan(&helper, cones.clone(), cols, batch_expect_rows); } } } } #[test] fn test_execution_summary() { let helper = BlockScanTestHelper::new(); let mut executor = BatchBlockScanFreeDaemon::new( helper.store(), Arc::new(EvalConfig::default()), helper.PrimaryCausets_info_by_idx(&[0]), vec![helper.whole_Block_cone()], vec![], false, false, ) .unwrap() .collect_summary(1); executor.next_batch(1); executor.next_batch(2); let mut s = ExecuteStats::new(2); executor.collect_exec_stats(&mut s); assert_eq!(s.scanned_rows_per_cone.len(), 1); assert_eq!(s.scanned_rows_per_cone[0], 3); // 0 remains Default because our output index is 1 assert_eq!(s.summary_per_executor[0], ExecSummary::default()); let exec_summary = s.summary_per_executor[1]; assert_eq!(3, exec_summary.num_produced_rows); assert_eq!(2, exec_summary.num_iterations); executor.collect_exec_stats(&mut s); // Collected statistics remain unchanged because of no newly generated delta statistics. assert_eq!(s.scanned_rows_per_cone.len(), 2); assert_eq!(s.scanned_rows_per_cone[0], 3); assert_eq!(s.scanned_rows_per_cone[1], 0); assert_eq!(s.summary_per_executor[0], ExecSummary::default()); let exec_summary = s.summary_per_executor[1]; assert_eq!(3, exec_summary.num_produced_rows); assert_eq!(2, exec_summary.num_iterations); // Reset collected statistics so that now we will only collect statistics in this round. s.clear(); executor.next_batch(10); executor.collect_exec_stats(&mut s); assert_eq!(s.scanned_rows_per_cone.len(), 1); assert_eq!(s.scanned_rows_per_cone[0], 2); assert_eq!(s.summary_per_executor[0], ExecSummary::default()); let exec_summary = s.summary_per_executor[1]; assert_eq!(2, exec_summary.num_produced_rows); assert_eq!(1, exec_summary.num_iterations); } #[test] fn test_corrupted_data() { const Block_ID: i64 = 5; let PrimaryCausets_info = vec![ { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_pk_handle(true); ci.set_PrimaryCauset_id(1); ci }, { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_PrimaryCauset_id(2); ci }, { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_PrimaryCauset_id(3); ci }, ]; let schemaReplicant = vec![ FieldTypeTp::LongLong.into(), FieldTypeTp::LongLong.into(), FieldTypeTp::LongLong.into(), ]; let mut ctx = EvalContext::default(); let mut kv = vec![]; { // Evcausetidx 0, which is not corrupted let key = Block::encode_row_key(Block_ID, 0); let value = Block::encode_row(&mut ctx, vec![Datum::I64(5), Datum::I64(7)], &[2, 3]).unwrap(); kv.push((key, value)); } { // Evcausetidx 1, which is not corrupted let key = Block::encode_row_key(Block_ID, 1); let value = vec![]; kv.push((key, value)); } { // Evcausetidx 2, which is partially corrupted let key = Block::encode_row_key(Block_ID, 2); let mut value = Block::encode_row(&mut ctx, vec![Datum::I64(5), Datum::I64(7)], &[2, 3]).unwrap(); // resize the value to make it partially corrupted value.truncate(value.len() - 3); kv.push((key, value)); } { // Evcausetidx 3, which is totally corrupted due to invalid datum flag for PrimaryCauset id let key = Block::encode_row_key(Block_ID, 3); // this datum flag does not exist let value = vec![255]; kv.push((key, value)); } { // Evcausetidx 4, which is totally corrupted due to missing datum for PrimaryCauset value let key = Block::encode_row_key(Block_ID, 4); let value = datum::encode_value(&mut ctx, &[Datum::I64(2)]).unwrap(); // col_id = 2 kv.push((key, value)); } let key_cone_point: Vec<_> = kv .iter() .enumerate() .map(|(index, _)| { let mut r = KeyCone::default(); r.set_spacelike(Block::encode_row_key(Block_ID, index as i64)); r.set_lightlike(r.get_spacelike().to_vec()); convert_to_prefix_next(r.mut_lightlike()); r }) .collect(); let store = FixtureStorage::from(kv); // For Evcausetidx 0 + Evcausetidx 1 + (Evcausetidx 2 ~ Evcausetidx 4), we should only get Evcausetidx 0, Evcausetidx 1 and an error. for corrupted_row_index in 2..=4 { let mut executor = BatchBlockScanFreeDaemon::new( store.clone(), Arc::new(EvalConfig::default()), PrimaryCausets_info.clone(), vec![ key_cone_point[0].clone(), key_cone_point[1].clone(), key_cone_point[corrupted_row_index].clone(), ], vec![], false, false, ) .unwrap(); let mut result = executor.next_batch(10); assert!(result.is_drained.is_err()); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), 3); assert_eq!(result.physical_PrimaryCausets.rows_len(), 2); assert!(result.physical_PrimaryCausets[0].is_decoded()); assert_eq!( result.physical_PrimaryCausets[0].decoded().to_int_vec(), &[Some(0), Some(1)] ); assert!(result.physical_PrimaryCausets[1].is_raw()); result.physical_PrimaryCausets[1] .ensure_all_decoded_for_test(&mut ctx, &schemaReplicant[1]) .unwrap(); assert_eq!( result.physical_PrimaryCausets[1].decoded().to_int_vec(), &[Some(5), None] ); assert!(result.physical_PrimaryCausets[2].is_raw()); result.physical_PrimaryCausets[2] .ensure_all_decoded_for_test(&mut ctx, &schemaReplicant[2]) .unwrap(); assert_eq!( result.physical_PrimaryCausets[2].decoded().to_int_vec(), &[Some(7), None] ); } } #[test] fn test_locked_data() { const Block_ID: i64 = 42; let PrimaryCausets_info = vec![ { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_pk_handle(true); ci.set_PrimaryCauset_id(1); ci }, { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_PrimaryCauset_id(2); ci }, ]; let schemaReplicant = vec![FieldTypeTp::LongLong.into(), FieldTypeTp::LongLong.into()]; let mut ctx = EvalContext::default(); let mut kv = vec![]; { // Evcausetidx 0: ok let key = Block::encode_row_key(Block_ID, 0); let value = Block::encode_row(&mut ctx, vec![Datum::I64(7)], &[2]).unwrap(); kv.push((key, Ok(value))); } { // Evcausetidx 1: causet_storage error let key = Block::encode_row_key(Block_ID, 1); let value: std::result::Result< _, Box<dyn lightlike + Sync + Fn() -> milevadb_query_common::error::StorageError>, > = Err(Box::new(|| failure::format_err!("locked").into())); kv.push((key, value)); } { // Evcausetidx 2: not locked let key = Block::encode_row_key(Block_ID, 2); let value = Block::encode_row(&mut ctx, vec![Datum::I64(5)], &[2]).unwrap(); kv.push((key, Ok(value))); } let key_cone_point: Vec<_> = kv .iter() .enumerate() .map(|(index, _)| { let mut r = KeyCone::default(); r.set_spacelike(Block::encode_row_key(Block_ID, index as i64)); r.set_lightlike(r.get_spacelike().to_vec()); convert_to_prefix_next(r.mut_lightlike()); r }) .collect(); let store = FixtureStorage::new(kv.into_iter().collect()); // Case 1: Evcausetidx 0 + Evcausetidx 1 + Evcausetidx 2 // We should get Evcausetidx 0 and error because no further events should be scanned when there is // an error. { let mut executor = BatchBlockScanFreeDaemon::new( store.clone(), Arc::new(EvalConfig::default()), PrimaryCausets_info.clone(), vec![ key_cone_point[0].clone(), key_cone_point[1].clone(), key_cone_point[2].clone(), ], vec![], false, false, ) .unwrap(); let mut result = executor.next_batch(10); assert!(result.is_drained.is_err()); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), 2); assert_eq!(result.physical_PrimaryCausets.rows_len(), 1); assert!(result.physical_PrimaryCausets[0].is_decoded()); assert_eq!( result.physical_PrimaryCausets[0].decoded().to_int_vec(), &[Some(0)] ); assert!(result.physical_PrimaryCausets[1].is_raw()); result.physical_PrimaryCausets[1] .ensure_all_decoded_for_test(&mut ctx, &schemaReplicant[1]) .unwrap(); assert_eq!( result.physical_PrimaryCausets[1].decoded().to_int_vec(), &[Some(7)] ); } // Let's also repeat case 1 for smaller batch size { let mut executor = BatchBlockScanFreeDaemon::new( store.clone(), Arc::new(EvalConfig::default()), PrimaryCausets_info.clone(), vec![ key_cone_point[0].clone(), key_cone_point[1].clone(), key_cone_point[2].clone(), ], vec![], false, false, ) .unwrap(); let mut result = executor.next_batch(1); assert!(!result.is_drained.is_err()); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), 2); assert_eq!(result.physical_PrimaryCausets.rows_len(), 1); assert!(result.physical_PrimaryCausets[0].is_decoded()); assert_eq!( result.physical_PrimaryCausets[0].decoded().to_int_vec(), &[Some(0)] ); assert!(result.physical_PrimaryCausets[1].is_raw()); result.physical_PrimaryCausets[1] .ensure_all_decoded_for_test(&mut ctx, &schemaReplicant[1]) .unwrap(); assert_eq!( result.physical_PrimaryCausets[1].decoded().to_int_vec(), &[Some(7)] ); let result = executor.next_batch(1); assert!(result.is_drained.is_err()); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), 2); assert_eq!(result.physical_PrimaryCausets.rows_len(), 0); } // Case 2: Evcausetidx 1 + Evcausetidx 2 // We should get error and no Evcausetidx, for the same reason as above. { let mut executor = BatchBlockScanFreeDaemon::new( store.clone(), Arc::new(EvalConfig::default()), PrimaryCausets_info.clone(), vec![key_cone_point[1].clone(), key_cone_point[2].clone()], vec![], false, false, ) .unwrap(); let result = executor.next_batch(10); assert!(result.is_drained.is_err()); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), 2); assert_eq!(result.physical_PrimaryCausets.rows_len(), 0); } // Case 3: Evcausetidx 2 + Evcausetidx 0 // We should get Evcausetidx 2 and Evcausetidx 0. There is no error. { let mut executor = BatchBlockScanFreeDaemon::new( store.clone(), Arc::new(EvalConfig::default()), PrimaryCausets_info.clone(), vec![key_cone_point[2].clone(), key_cone_point[0].clone()], vec![], false, false, ) .unwrap(); let mut result = executor.next_batch(10); assert!(!result.is_drained.is_err()); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), 2); assert_eq!(result.physical_PrimaryCausets.rows_len(), 2); assert!(result.physical_PrimaryCausets[0].is_decoded()); assert_eq!( result.physical_PrimaryCausets[0].decoded().to_int_vec(), &[Some(2), Some(0)] ); assert!(result.physical_PrimaryCausets[1].is_raw()); result.physical_PrimaryCausets[1] .ensure_all_decoded_for_test(&mut ctx, &schemaReplicant[1]) .unwrap(); assert_eq!( result.physical_PrimaryCausets[1].decoded().to_int_vec(), &[Some(5), Some(7)] ); } // Case 4: Evcausetidx 1 // We should get error. { let mut executor = BatchBlockScanFreeDaemon::new( store, Arc::new(EvalConfig::default()), PrimaryCausets_info, vec![key_cone_point[1].clone()], vec![], false, false, ) .unwrap(); let result = executor.next_batch(10); assert!(result.is_drained.is_err()); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), 2); assert_eq!(result.physical_PrimaryCausets.rows_len(), 0); } } fn test_multi_handle_PrimaryCauset_impl(PrimaryCausets_is_pk: &[bool]) { const Block_ID: i64 = 42; // This test makes a pk PrimaryCauset with id = 1 and non-pk PrimaryCausets with id // in 10 to 10 + PrimaryCausets_is_pk.len(). // PK PrimaryCausets will be set to PrimaryCauset 1 and others will be set to PrimaryCauset 10 + i, where i is // the index of each PrimaryCauset. let mut PrimaryCausets_info = Vec::new(); for (i, is_pk) in PrimaryCausets_is_pk.iter().enumerate() { let mut ci = PrimaryCausetInfo::default(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_pk_handle(*is_pk); ci.set_PrimaryCauset_id(if *is_pk { 1 } else { i as i64 + 10 }); PrimaryCausets_info.push(ci); } let mut schemaReplicant = Vec::new(); schemaReplicant.resize(PrimaryCausets_is_pk.len(), FieldTypeTp::LongLong.into()); let key = Block::encode_row_key(Block_ID, 1); let col_ids = (10..10 + schemaReplicant.len() as i64).collect::<Vec<_>>(); let Evcausetidx = col_ids.iter().map(|i| Datum::I64(*i)).collect(); let value = Block::encode_row(&mut EvalContext::default(), Evcausetidx, &col_ids).unwrap(); let mut key_cone = KeyCone::default(); key_cone.set_spacelike(Block::encode_row_key(Block_ID, std::i64::MIN)); key_cone.set_lightlike(Block::encode_row_key(Block_ID, std::i64::MAX)); let store = FixtureStorage::new(iter::once((key, (Ok(value)))).collect()); let mut executor = BatchBlockScanFreeDaemon::new( store, Arc::new(EvalConfig::default()), PrimaryCausets_info, vec![key_cone], vec![], false, false, ) .unwrap(); let mut result = executor.next_batch(10); assert_eq!(result.is_drained.unwrap(), true); assert_eq!(result.logical_rows.len(), 1); assert_eq!(result.physical_PrimaryCausets.PrimaryCausets_len(), PrimaryCausets_is_pk.len()); for i in 0..PrimaryCausets_is_pk.len() { result.physical_PrimaryCausets[i] .ensure_all_decoded_for_test(&mut EvalContext::default(), &schemaReplicant[i]) .unwrap(); if PrimaryCausets_is_pk[i] { assert_eq!( result.physical_PrimaryCausets[i].decoded().to_int_vec(), &[Some(1)] ); } else { assert_eq!( result.physical_PrimaryCausets[i].decoded().to_int_vec(), &[Some(i as i64 + 10)] ); } } } #[test] fn test_multi_handle_PrimaryCauset() { test_multi_handle_PrimaryCauset_impl(&[true]); test_multi_handle_PrimaryCauset_impl(&[false]); test_multi_handle_PrimaryCauset_impl(&[true, false]); test_multi_handle_PrimaryCauset_impl(&[false, true]); test_multi_handle_PrimaryCauset_impl(&[true, true]); test_multi_handle_PrimaryCauset_impl(&[true, false, true]); test_multi_handle_PrimaryCauset_impl(&[ false, false, false, true, false, false, true, true, false, false, ]); } #[derive(Copy, Clone)] struct PrimaryCauset { is_primary_PrimaryCauset: bool, has_PrimaryCauset_info: bool, } fn test_common_handle_impl(PrimaryCausets: &[PrimaryCauset]) { const Block_ID: i64 = 2333; // Prepare some Block meta data let mut PrimaryCausets_info = vec![]; let mut schemaReplicant = vec![]; let mut handle = vec![]; let mut primary_PrimaryCauset_ids = vec![]; let mut missed_PrimaryCausets_info = vec![]; let PrimaryCauset_ids = (0..PrimaryCausets.len() as i64).collect::<Vec<_>>(); let mut Evcausetidx = vec![]; for (i, &PrimaryCauset) in PrimaryCausets.iter().enumerate() { let PrimaryCauset { is_primary_PrimaryCauset, has_PrimaryCauset_info, } = PrimaryCauset; if has_PrimaryCauset_info { let mut ci = PrimaryCausetInfo::default(); ci.set_PrimaryCauset_id(i as i64); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); PrimaryCausets_info.push(ci); schemaReplicant.push(FieldTypeTp::LongLong.into()); } else { missed_PrimaryCausets_info.push(i as i64); } if is_primary_PrimaryCauset { handle.push(Datum::I64(i as i64)); primary_PrimaryCauset_ids.push(i as i64); } Evcausetidx.push(Datum::I64(i as i64)); } let handle = datum::encode_key(&mut EvalContext::default(), &handle).unwrap(); let key = Block::encode_common_handle_for_test(Block_ID, &handle); let value = Block::encode_row(&mut EvalContext::default(), Evcausetidx, &PrimaryCauset_ids).unwrap(); // Constructs a cone that includes the constructed key. let mut key_cone = KeyCone::default(); let begin = Block::encode_common_handle_for_test(Block_ID - 1, &handle); let lightlike = Block::encode_common_handle_for_test(Block_ID + 1, &handle); key_cone.set_spacelike(begin); key_cone.set_lightlike(lightlike); let store = FixtureStorage::new(iter::once((key, (Ok(value)))).collect()); let mut executor = BatchBlockScanFreeDaemon::new( store, Arc::new(EvalConfig::default()), PrimaryCausets_info.clone(), vec![key_cone], primary_PrimaryCauset_ids, false, false, ) .unwrap(); let mut result = executor.next_batch(10); assert_eq!(result.is_drained.unwrap(), true); assert_eq!(result.logical_rows.len(), 1); assert_eq!( result.physical_PrimaryCausets.PrimaryCausets_len(), PrimaryCausets.len() - missed_PrimaryCausets_info.len() ); // We expect we fill the primary PrimaryCauset with the value allegro in the common handle. for i in 0..result.physical_PrimaryCausets.PrimaryCausets_len() { result.physical_PrimaryCausets[i] .ensure_all_decoded_for_test(&mut EvalContext::default(), &schemaReplicant[i]) .unwrap(); assert_eq!( result.physical_PrimaryCausets[i].decoded().to_int_vec(), &[Some(PrimaryCausets_info[i].get_PrimaryCauset_id())] ); } } #[test] fn test_common_handle() { test_common_handle_impl(&[PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: true, }]); test_common_handle_impl(&[ PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: true, }, ]); test_common_handle_impl(&[ PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: true, }, ]); test_common_handle_impl(&[ PrimaryCauset { is_primary_PrimaryCauset: false, has_PrimaryCauset_info: false, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: true, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, ]); test_common_handle_impl(&[ PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, PrimaryCauset { is_primary_PrimaryCauset: false, has_PrimaryCauset_info: true, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, ]); test_common_handle_impl(&[ PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: true, }, PrimaryCauset { is_primary_PrimaryCauset: false, has_PrimaryCauset_info: true, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: true, }, PrimaryCauset { is_primary_PrimaryCauset: true, has_PrimaryCauset_info: false, }, ]); } }
39.243849
145
0.548391
626dc04fbe52ef0b97b795d58c534392743c3899
2,137
use crate::{MatFile, MatioError, Result}; use std::{ fs, io, path::{Path, PathBuf}, ptr, }; /// Mat file acces modes pub enum AccessMode { ReadOnly, ReadWrite, } /// Mat file builder pub struct Builder { mat_name: PathBuf, access_mode: AccessMode, } impl Builder { /// Creates a new mat file loader object from the `path` pub fn new<P: AsRef<Path>>(path: P) -> Self { Self { mat_name: path.as_ref().to_path_buf(), access_mode: AccessMode::ReadOnly, } } /// Sets the access mode to read-only (default) pub fn read_only(self) -> Self { Self { access_mode: AccessMode::ReadOnly, ..self } } /// Sets the access mode to read-write pub fn read_write(self) -> Self { Self { access_mode: AccessMode::ReadWrite, ..self } } /// Loads a mat file pub fn load(self) -> Result<MatFile> { let attrs = fs::metadata(&self.mat_name)?; if attrs.is_file() { let mat_name = std::ffi::CString::new(self.mat_name.to_str().unwrap())?; let mat_t = unsafe { ffi::Mat_Open(mat_name.as_ptr(), self.access_mode as i32) }; if mat_t.is_null() { Err(MatioError::MatOpen( self.mat_name.to_str().unwrap().to_string(), )) } else { Ok(MatFile { mat_t }) } } else { Err(MatioError::NoFile(io::Error::new( io::ErrorKind::NotFound, format!("mat file {} not found", self.mat_name.to_str().unwrap()), ))) } } pub fn save(self) -> Result<MatFile> { let mat_name = std::ffi::CString::new(self.mat_name.to_str().unwrap())?; let mat_t = unsafe { ffi::Mat_CreateVer(mat_name.as_ptr(), ptr::null(), ffi::mat_ft_MAT_FT_MAT5) }; if mat_t.is_null() { Err(MatioError::MatOpen( self.mat_name.to_str().unwrap().to_string(), )) } else { Ok(MatFile { mat_t }) } } }
28.878378
99
0.518484
757ab428d6b6fd5978f071206069adc652a64fc3
785
extern crate rand; use std::io; use std::cmp::Ordering; use rand::Rng; fn main() { println!("Adivina el numero!"); let num_secreto = rand::thread_rng().gen_range(1..101); loop { println!("Por favor introduce un numero:"); let mut entrada_num = String::new(); io::stdin().read_line(&mut entrada_num).ok().expect("Falló al leer la linea"); let num: u32 = match entrada_num.trim().parse(){ Ok(num) => num, Err(_) => continue, }; match num.cmp(&num_secreto) { Ordering::Less => println!("Muy pequeño"), Ordering::Greater => println!("Muy grande"), Ordering::Equal => { println!("Haz ganado"); break; } } } }
25.322581
86
0.518471
7a9a7277a8e29467efc3ce91db3b9c8a935fea64
5,783
// Copyright 2019-2021 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any // person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the // Software without restriction, including without // limitation the rights to use, copy, modify, merge, // publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software // is furnished to do so, subject to the following // conditions: // // The above copyright notice and this permission notice // shall be included in all copies or substantial portions // of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF // ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED // TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A // PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT // SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR // IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. //! Access control based on HTTP headers pub(crate) mod cors; pub(crate) mod hosts; mod matcher; use hosts::{AllowHosts, Host}; use cors::{AccessControlAllowHeaders, AccessControlAllowOrigin}; use hyper::header; use jsonrpsee_utils::http_helpers; /// Define access on control on HTTP layer. #[derive(Clone, Debug)] pub struct AccessControl { allow_hosts: AllowHosts, cors_allow_origin: Option<Vec<AccessControlAllowOrigin>>, cors_allow_headers: AccessControlAllowHeaders, continue_on_invalid_cors: bool, } impl AccessControl { /// Validate incoming request by http HOST pub fn deny_host(&self, request: &hyper::Request<hyper::Body>) -> bool { !hosts::is_host_valid(http_helpers::read_header_value(request.headers(), "host"), &self.allow_hosts) } /// Validate incoming request by CORS origin pub fn deny_cors_origin(&self, request: &hyper::Request<hyper::Body>) -> bool { let header = cors::get_cors_allow_origin( http_helpers::read_header_value(request.headers(), "origin"), http_helpers::read_header_value(request.headers(), "host"), &self.cors_allow_origin, ) .map(|origin| { use self::cors::AccessControlAllowOrigin::*; match origin { Value(ref val) => { header::HeaderValue::from_str(val).unwrap_or_else(|_| header::HeaderValue::from_static("null")) } Null => header::HeaderValue::from_static("null"), Any => header::HeaderValue::from_static("*"), } }); header == cors::AllowCors::Invalid && !self.continue_on_invalid_cors } /// Validate incoming request by CORS header pub fn deny_cors_header(&self, request: &hyper::Request<hyper::Body>) -> bool { let headers = request.headers().keys().map(|name| name.as_str()); let requested_headers = http_helpers::read_header_values(request.headers(), "access-control-request-headers") .filter_map(|val| val.to_str().ok()) .flat_map(|val| val.split(", ")) .flat_map(|val| val.split(',')); let header = cors::get_cors_allow_headers(headers, requested_headers, &self.cors_allow_headers, |name| { header::HeaderValue::from_str(name).unwrap_or_else(|_| header::HeaderValue::from_static("unknown")) }); header == cors::AllowCors::Invalid && !self.continue_on_invalid_cors } } impl Default for AccessControl { fn default() -> Self { Self { allow_hosts: AllowHosts::Any, cors_allow_origin: None, cors_allow_headers: AccessControlAllowHeaders::Any, continue_on_invalid_cors: false, } } } /// Convenience builder pattern #[derive(Debug)] pub struct AccessControlBuilder { allow_hosts: AllowHosts, cors_allow_origin: Option<Vec<AccessControlAllowOrigin>>, cors_allow_headers: AccessControlAllowHeaders, continue_on_invalid_cors: bool, } impl Default for AccessControlBuilder { fn default() -> Self { Self { allow_hosts: AllowHosts::Any, cors_allow_origin: None, cors_allow_headers: AccessControlAllowHeaders::Any, continue_on_invalid_cors: false, } } } impl AccessControlBuilder { /// Create a new builder for `AccessControl`. pub fn new() -> Self { Self::default() } /// Configure allow host. pub fn allow_host(mut self, host: Host) -> Self { let allow_hosts = match self.allow_hosts { AllowHosts::Any => vec![host], AllowHosts::Only(mut allow_hosts) => { allow_hosts.push(host); allow_hosts } }; self.allow_hosts = AllowHosts::Only(allow_hosts); self } /// Configure CORS origin. pub fn cors_allow_origin(mut self, allow_origin: AccessControlAllowOrigin) -> Self { let cors_allow_origin = match self.cors_allow_origin { Some(mut cors_allow_origin) => { cors_allow_origin.push(allow_origin); cors_allow_origin } None => vec![allow_origin], }; self.cors_allow_origin = Some(cors_allow_origin); self } /// Configure which CORS header that is allowed. pub fn cors_allow_header(mut self, header: String) -> Self { let allow_headers = match self.cors_allow_headers { AccessControlAllowHeaders::Any => vec![header], AccessControlAllowHeaders::Only(mut allow_headers) => { allow_headers.push(header); allow_headers } }; self.cors_allow_headers = AccessControlAllowHeaders::Only(allow_headers); self } /// Enable or disable to continue with invalid CORS. pub fn continue_on_invalid_cors(mut self, continue_on_invalid_cors: bool) -> Self { self.continue_on_invalid_cors = continue_on_invalid_cors; self } /// Build. pub fn build(self) -> AccessControl { AccessControl { allow_hosts: self.allow_hosts, cors_allow_origin: self.cors_allow_origin, cors_allow_headers: self.cors_allow_headers, continue_on_invalid_cors: self.continue_on_invalid_cors, } } }
31.950276
111
0.733875
de37055e839cc1245944fe16ce8f3607241f86e5
2,653
fn main() { // Skip pkg-config check if just generating documentation. if cfg!(doc) { return; } let lib_name = "libpulse-mainloop-glib"; let fallback_name = { #[cfg(target_os = "linux")] { "pulse-mainloop-glib::libpulse-mainloop-glib.so.0" } #[cfg(target_os = "macos")] { "pulse-mainloop-glib::libpulse-mainloop-glib.0.dylib" } #[cfg(windows)] { "pulse-mainloop-glib::libpulse-mainloop-glib-0.dll" } #[cfg(not(any(target_os = "linux", target_os = "macos", windows)))] { "pulse-mainloop-glib" } }; let min_version = "4.0"; let mut config = pkg_config::Config::new(); // Has the user got pkg-config and the PA pkg-config file installed (via dev package)? // This is a little crude, since impossible to reliably distinguish between pkg-config errors // (it only gives strings, and they could be translated). We perform a non-version specific // check here, and disable generation of cargo meta data, thus doing a 'exists' type check. config.cargo_metadata(false); let fallback = match config.probe(lib_name) { // We assume all failure here (being a non-version specific check) indicates no *.pc file Err(pkg_config::Error::ProbeFailure { .. }) | Err(pkg_config::Error::Failure { .. }) => { println!("cargo:warning=Pkg-config seems to not know about PulseAudio (dev package not installed?), \ trying generic fallback..."); true }, // Also allow fallback if pkg-config not installed, or disabled Err(pkg_config::Error::EnvNoPkgConfig(_)) | Err(pkg_config::Error::Command { .. }) => { println!("cargo:warning=No pkg-config or disabled, trying generic fallback..."); true }, // In all other cases we will perform a version-specfic check and honor the result _ => false, }; // If the user does not have pkg-config or the PA *.pc file (they have not installed the dev // package), then let’s try a default fallback (having to install dev packages for Rust // development is unnatural imo, ideally distros should start shipping *.pc files differently). if fallback { println!("cargo:rustc-link-lib={}", fallback_name); return; } config.cargo_metadata(true) .atleast_version(min_version); // Do version specific pkg-config check and honor result match config.probe(lib_name) { Err(e) => { println!("cargo:warning={}", e); std::process::exit(1); }, Ok(_) => {}, } }
40.815385
113
0.612891
fceb85f34bde4a7aa2bce8ea11ecf195d24b93b8
362
#[macro_export] macro_rules! hashmap { ( $( $key:expr => $value:expr, )* ) => { { let mut hm = ::std::collections::HashMap::new(); $( hm.insert($key, $value); )* hm } }; ( $( $key:expr => $value:expr ),* ) => { ::macros::hashmap!($( $key => $value, )*) }; }
22.625
60
0.367403
67feb3ff6aec3a98457c1b87efba2fdfb27e706b
166
#![feature(inner_deref)] fn main() { let _result = &Some(42).as_deref(); //~^ ERROR no method named `as_deref` found for type `std::option::Option<{integer}>` }
23.714286
85
0.650602
1a3a261e436e68e8f38441735bda0c31849fc60a
230
pub mod ast; pub mod graph; pub mod lexer; pub mod node_interner; pub mod parser; pub mod hir; pub mod hir_def; // Lexer API pub use lexer::token; //Parser API pub use parser::{ParsedModule, Parser}; //AST API pub use ast::*;
12.777778
39
0.704348
fbe55e191dd7a8b77283d1783d0a8e3415ede9ed
3,937
//! HTTP Range header /// HTTP Range header /// /// See <https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35> #[allow(clippy::exhaustive_enums, missing_copy_implementations)] #[derive(Debug, Clone)] pub enum Range { /// Normal byte range Normal { /// first first: u64, /// last last: Option<u64>, }, /// Suffix byte range Suffix { /// last last: u64, }, } /// `ParseRangeError` #[allow(missing_copy_implementations)] // Why? See `crate::path::ParseS3PathError`. #[derive(Debug, thiserror::Error)] #[error("ParseRangeError")] pub struct ParseRangeError { /// private place holder _priv: (), } impl Range { /// Parses `Range` from header /// # Errors /// Returns an error if the header is invalid pub fn from_header_str(header: &str) -> Result<Self, ParseRangeError> { /// nom parser fn parse(input: &str) -> nom::IResult<&str, Range> { use nom::{ branch::alt, bytes::complete::tag, character::complete::digit1, combinator::{all_consuming, map, map_res, opt}, sequence::tuple, }; let normal_parser = map_res( tuple(( map_res(digit1, str::parse::<u64>), tag("-"), opt(map_res(digit1, str::parse::<u64>)), )), |ss: (u64, &str, Option<u64>)| { if let (first, Some(last)) = (ss.0, ss.2) { if first > last { return Err(ParseRangeError { _priv: () }); } } Ok(Range::Normal { first: ss.0, last: ss.2, }) }, ); let suffix_parser = map( tuple((tag("-"), map_res(digit1, str::parse::<u64>))), |ss: (&str, u64)| Range::Suffix { last: ss.1 }, ); let mut parser = all_consuming(tuple((tag("bytes="), alt((normal_parser, suffix_parser))))); let (input, (_, ans)) = parser(input)?; Ok((input, ans)) } match parse(header) { Err(_) => Err(ParseRangeError { _priv: () }), Ok((_, ans)) => Ok(ans), } } } #[cfg(test)] mod tests { use super::*; #[test] fn byte_range() { { let src = "bytes=0-499"; let result = Range::from_header_str(src); assert!(matches!( result.unwrap(), Range::Normal { first: 0, last: Some(499) } )); } { let src = "bytes=0-499;"; let result = Range::from_header_str(src); assert!(result.is_err()); } { let src = "bytes=9500-"; let result = Range::from_header_str(src); assert!(matches!( result.unwrap(), Range::Normal { first: 9500, last: None } )); } { let src = "bytes=9500-0-"; let result = Range::from_header_str(src); assert!(result.is_err()); } { let src = "bytes=-500"; let result = Range::from_header_str(src); assert!(matches!(result.unwrap(), Range::Suffix { last: 500 })); } { let src = "bytes=-500 "; let result = Range::from_header_str(src); assert!(result.is_err()); } { let src = "bytes=-1000000000000000000000000"; let result = Range::from_header_str(src); assert!(result.is_err()); } } }
27.921986
91
0.437389
030ff085c35fe43d5f9a255784074b41b4996c77
2,559
use super::{Config, StringList, Value}; use serde::{de::Error, Deserialize}; use std::path::PathBuf; /// Use with the `get` API to fetch a string that will be converted to a /// `PathBuf`. Relative paths are converted to absolute paths based on the /// location of the config file. #[derive(Debug, Deserialize, PartialEq, Clone)] #[serde(transparent)] pub struct ConfigRelativePath(Value<String>); impl ConfigRelativePath { /// Returns the raw underlying configuration value for this key. pub fn raw_value(&self) -> &str { &self.0.val } /// Resolves this configuration-relative path to an absolute path. /// /// This will always return an absolute path where it's relative to the /// location for configuration for this value. pub fn resolve_path(&self, config: &Config) -> PathBuf { self.0.definition.root(config).join(&self.0.val) } /// Resolves this configuration-relative path to either an absolute path or /// something appropriate to execute from `PATH`. /// /// Values which don't look like a filesystem path (don't contain `/` or /// `\`) will be returned as-is, and everything else will fall through to an /// absolute path. pub fn resolve_program(self, config: &Config) -> PathBuf { config.string_to_path(self.0.val, &self.0.definition) } } /// A config type that is a program to run. /// /// This supports a list of strings like `['/path/to/program', 'somearg']` /// or a space separated string like `'/path/to/program somearg'`. /// /// This expects the first value to be the path to the program to run. /// Subsequent values are strings of arguments to pass to the program. /// /// Typically you should use `ConfigRelativePath::resolve_program` on the path /// to get the actual program. #[derive(Debug, Clone)] pub struct PathAndArgs { pub path: ConfigRelativePath, pub args: Vec<String>, } impl<'de> serde::Deserialize<'de> for PathAndArgs { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let vsl = Value::<StringList>::deserialize(deserializer)?; let mut strings = vsl.val.0; if strings.is_empty() { return Err(D::Error::invalid_length(0, &"at least one element")); } let first = strings.remove(0); let crp = Value { val: first, definition: vsl.definition, }; Ok(PathAndArgs { path: ConfigRelativePath(crp), args: strings, }) } }
34.581081
80
0.648691
48fec71325814090cf88e111e8391f5137c00a16
59,404
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateLedgerOutput { /// <p>The name of the ledger.</p> pub name: std::option::Option<std::string::String>, /// <p>The Amazon Resource Name (ARN) for the ledger.</p> pub arn: std::option::Option<std::string::String>, /// <p>The current status of the ledger.</p> pub state: std::option::Option<crate::model::LedgerState>, /// <p>The date and time, in epoch time format, when the ledger was created. (Epoch time format /// is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)</p> pub creation_date_time: std::option::Option<smithy_types::Instant>, /// <p>The flag that prevents a ledger from being deleted by any user. If not provided on /// ledger creation, this feature is enabled (<code>true</code>) by default.</p> /// <p>If deletion protection is enabled, you must first disable it before you can delete the /// ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the /// <code>UpdateLedger</code> operation to set the flag to <code>false</code>. The QLDB /// console disables deletion protection for you when you use it to delete a ledger.</p> pub deletion_protection: std::option::Option<bool>, } impl std::fmt::Debug for UpdateLedgerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateLedgerOutput"); formatter.field("name", &self.name); formatter.field("arn", &self.arn); formatter.field("state", &self.state); formatter.field("creation_date_time", &self.creation_date_time); formatter.field("deletion_protection", &self.deletion_protection); formatter.finish() } } /// See [`UpdateLedgerOutput`](crate::output::UpdateLedgerOutput) pub mod update_ledger_output { /// A builder for [`UpdateLedgerOutput`](crate::output::UpdateLedgerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { name: std::option::Option<std::string::String>, arn: std::option::Option<std::string::String>, state: std::option::Option<crate::model::LedgerState>, creation_date_time: std::option::Option<smithy_types::Instant>, deletion_protection: std::option::Option<bool>, } impl Builder { /// <p>The name of the ledger.</p> pub fn name(mut self, inp: impl Into<std::string::String>) -> Self { self.name = Some(inp.into()); self } pub fn set_name(mut self, inp: std::option::Option<std::string::String>) -> Self { self.name = inp; self } /// <p>The Amazon Resource Name (ARN) for the ledger.</p> pub fn arn(mut self, inp: impl Into<std::string::String>) -> Self { self.arn = Some(inp.into()); self } pub fn set_arn(mut self, inp: std::option::Option<std::string::String>) -> Self { self.arn = inp; self } /// <p>The current status of the ledger.</p> pub fn state(mut self, inp: crate::model::LedgerState) -> Self { self.state = Some(inp); self } pub fn set_state(mut self, inp: std::option::Option<crate::model::LedgerState>) -> Self { self.state = inp; self } /// <p>The date and time, in epoch time format, when the ledger was created. (Epoch time format /// is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)</p> pub fn creation_date_time(mut self, inp: smithy_types::Instant) -> Self { self.creation_date_time = Some(inp); self } pub fn set_creation_date_time( mut self, inp: std::option::Option<smithy_types::Instant>, ) -> Self { self.creation_date_time = inp; self } /// <p>The flag that prevents a ledger from being deleted by any user. If not provided on /// ledger creation, this feature is enabled (<code>true</code>) by default.</p> /// <p>If deletion protection is enabled, you must first disable it before you can delete the /// ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the /// <code>UpdateLedger</code> operation to set the flag to <code>false</code>. The QLDB /// console disables deletion protection for you when you use it to delete a ledger.</p> pub fn deletion_protection(mut self, inp: bool) -> Self { self.deletion_protection = Some(inp); self } pub fn set_deletion_protection(mut self, inp: std::option::Option<bool>) -> Self { self.deletion_protection = inp; self } /// Consumes the builder and constructs a [`UpdateLedgerOutput`](crate::output::UpdateLedgerOutput) pub fn build(self) -> crate::output::UpdateLedgerOutput { crate::output::UpdateLedgerOutput { name: self.name, arn: self.arn, state: self.state, creation_date_time: self.creation_date_time, deletion_protection: self.deletion_protection, } } } } impl UpdateLedgerOutput { /// Creates a new builder-style object to manufacture [`UpdateLedgerOutput`](crate::output::UpdateLedgerOutput) pub fn builder() -> crate::output::update_ledger_output::Builder { crate::output::update_ledger_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UntagResourceOutput {} impl std::fmt::Debug for UntagResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UntagResourceOutput"); formatter.finish() } } /// See [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub mod untag_resource_output { /// A builder for [`UntagResourceOutput`](crate::output::UntagResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub fn build(self) -> crate::output::UntagResourceOutput { crate::output::UntagResourceOutput {} } } } impl UntagResourceOutput { /// Creates a new builder-style object to manufacture [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub fn builder() -> crate::output::untag_resource_output::Builder { crate::output::untag_resource_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct TagResourceOutput {} impl std::fmt::Debug for TagResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("TagResourceOutput"); formatter.finish() } } /// See [`TagResourceOutput`](crate::output::TagResourceOutput) pub mod tag_resource_output { /// A builder for [`TagResourceOutput`](crate::output::TagResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`TagResourceOutput`](crate::output::TagResourceOutput) pub fn build(self) -> crate::output::TagResourceOutput { crate::output::TagResourceOutput {} } } } impl TagResourceOutput { /// Creates a new builder-style object to manufacture [`TagResourceOutput`](crate::output::TagResourceOutput) pub fn builder() -> crate::output::tag_resource_output::Builder { crate::output::tag_resource_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StreamJournalToKinesisOutput { /// <p>The unique ID that QLDB assigns to each QLDB journal stream.</p> pub stream_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for StreamJournalToKinesisOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StreamJournalToKinesisOutput"); formatter.field("stream_id", &self.stream_id); formatter.finish() } } /// See [`StreamJournalToKinesisOutput`](crate::output::StreamJournalToKinesisOutput) pub mod stream_journal_to_kinesis_output { /// A builder for [`StreamJournalToKinesisOutput`](crate::output::StreamJournalToKinesisOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { stream_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique ID that QLDB assigns to each QLDB journal stream.</p> pub fn stream_id(mut self, inp: impl Into<std::string::String>) -> Self { self.stream_id = Some(inp.into()); self } pub fn set_stream_id(mut self, inp: std::option::Option<std::string::String>) -> Self { self.stream_id = inp; self } /// Consumes the builder and constructs a [`StreamJournalToKinesisOutput`](crate::output::StreamJournalToKinesisOutput) pub fn build(self) -> crate::output::StreamJournalToKinesisOutput { crate::output::StreamJournalToKinesisOutput { stream_id: self.stream_id, } } } } impl StreamJournalToKinesisOutput { /// Creates a new builder-style object to manufacture [`StreamJournalToKinesisOutput`](crate::output::StreamJournalToKinesisOutput) pub fn builder() -> crate::output::stream_journal_to_kinesis_output::Builder { crate::output::stream_journal_to_kinesis_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListTagsForResourceOutput { /// <p>The tags that are currently associated with the specified Amazon QLDB resource.</p> pub tags: std::option::Option< std::collections::HashMap<std::string::String, std::option::Option<std::string::String>>, >, } impl std::fmt::Debug for ListTagsForResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListTagsForResourceOutput"); formatter.field("tags", &self.tags); formatter.finish() } } /// See [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub mod list_tags_for_resource_output { /// A builder for [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { tags: std::option::Option< std::collections::HashMap< std::string::String, std::option::Option<std::string::String>, >, >, } impl Builder { pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::option::Option<std::string::String>>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, inp: std::option::Option< std::collections::HashMap< std::string::String, std::option::Option<std::string::String>, >, >, ) -> Self { self.tags = inp; self } /// Consumes the builder and constructs a [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub fn build(self) -> crate::output::ListTagsForResourceOutput { crate::output::ListTagsForResourceOutput { tags: self.tags } } } } impl ListTagsForResourceOutput { /// Creates a new builder-style object to manufacture [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub fn builder() -> crate::output::list_tags_for_resource_output::Builder { crate::output::list_tags_for_resource_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListLedgersOutput { /// <p>The array of ledger summaries that are associated with the current AWS account and /// Region.</p> pub ledgers: std::option::Option<std::vec::Vec<crate::model::LedgerSummary>>, /// <p>A pagination token, indicating whether there are more results available:</p> /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, then the last page of results has been /// processed and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, then there are more /// results available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent <code>ListLedgers</code> call.</p> /// </li> /// </ul> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListLedgersOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListLedgersOutput"); formatter.field("ledgers", &self.ledgers); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListLedgersOutput`](crate::output::ListLedgersOutput) pub mod list_ledgers_output { /// A builder for [`ListLedgersOutput`](crate::output::ListLedgersOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { ledgers: std::option::Option<std::vec::Vec<crate::model::LedgerSummary>>, next_token: std::option::Option<std::string::String>, } impl Builder { pub fn ledgers(mut self, inp: impl Into<crate::model::LedgerSummary>) -> Self { let mut v = self.ledgers.unwrap_or_default(); v.push(inp.into()); self.ledgers = Some(v); self } pub fn set_ledgers( mut self, inp: std::option::Option<std::vec::Vec<crate::model::LedgerSummary>>, ) -> Self { self.ledgers = inp; self } /// <p>A pagination token, indicating whether there are more results available:</p> /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, then the last page of results has been /// processed and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, then there are more /// results available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent <code>ListLedgers</code> call.</p> /// </li> /// </ul> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.next_token = Some(inp.into()); self } pub fn set_next_token(mut self, inp: std::option::Option<std::string::String>) -> Self { self.next_token = inp; self } /// Consumes the builder and constructs a [`ListLedgersOutput`](crate::output::ListLedgersOutput) pub fn build(self) -> crate::output::ListLedgersOutput { crate::output::ListLedgersOutput { ledgers: self.ledgers, next_token: self.next_token, } } } } impl ListLedgersOutput { /// Creates a new builder-style object to manufacture [`ListLedgersOutput`](crate::output::ListLedgersOutput) pub fn builder() -> crate::output::list_ledgers_output::Builder { crate::output::list_ledgers_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListJournalS3ExportsForLedgerOutput { /// <p>The array of journal export job descriptions that are associated with the specified /// ledger.</p> pub journal_s3_exports: std::option::Option<std::vec::Vec<crate::model::JournalS3ExportDescription>>, /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, then the last page of results has been /// processed and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, then there are more /// results available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent <code>ListJournalS3ExportsForLedger</code> /// call.</p> /// </li> /// </ul> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListJournalS3ExportsForLedgerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListJournalS3ExportsForLedgerOutput"); formatter.field("journal_s3_exports", &self.journal_s3_exports); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListJournalS3ExportsForLedgerOutput`](crate::output::ListJournalS3ExportsForLedgerOutput) pub mod list_journal_s3_exports_for_ledger_output { /// A builder for [`ListJournalS3ExportsForLedgerOutput`](crate::output::ListJournalS3ExportsForLedgerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { journal_s3_exports: std::option::Option<std::vec::Vec<crate::model::JournalS3ExportDescription>>, next_token: std::option::Option<std::string::String>, } impl Builder { pub fn journal_s3_exports( mut self, inp: impl Into<crate::model::JournalS3ExportDescription>, ) -> Self { let mut v = self.journal_s3_exports.unwrap_or_default(); v.push(inp.into()); self.journal_s3_exports = Some(v); self } pub fn set_journal_s3_exports( mut self, inp: std::option::Option<std::vec::Vec<crate::model::JournalS3ExportDescription>>, ) -> Self { self.journal_s3_exports = inp; self } /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, then the last page of results has been /// processed and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, then there are more /// results available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent <code>ListJournalS3ExportsForLedger</code> /// call.</p> /// </li> /// </ul> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.next_token = Some(inp.into()); self } pub fn set_next_token(mut self, inp: std::option::Option<std::string::String>) -> Self { self.next_token = inp; self } /// Consumes the builder and constructs a [`ListJournalS3ExportsForLedgerOutput`](crate::output::ListJournalS3ExportsForLedgerOutput) pub fn build(self) -> crate::output::ListJournalS3ExportsForLedgerOutput { crate::output::ListJournalS3ExportsForLedgerOutput { journal_s3_exports: self.journal_s3_exports, next_token: self.next_token, } } } } impl ListJournalS3ExportsForLedgerOutput { /// Creates a new builder-style object to manufacture [`ListJournalS3ExportsForLedgerOutput`](crate::output::ListJournalS3ExportsForLedgerOutput) pub fn builder() -> crate::output::list_journal_s3_exports_for_ledger_output::Builder { crate::output::list_journal_s3_exports_for_ledger_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListJournalS3ExportsOutput { /// <p>The array of journal export job descriptions for all ledgers that are associated with /// the current AWS account and Region.</p> pub journal_s3_exports: std::option::Option<std::vec::Vec<crate::model::JournalS3ExportDescription>>, /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, then the last page of results has been /// processed and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, then there are more /// results available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent <code>ListJournalS3Exports</code> /// call.</p> /// </li> /// </ul> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListJournalS3ExportsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListJournalS3ExportsOutput"); formatter.field("journal_s3_exports", &self.journal_s3_exports); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListJournalS3ExportsOutput`](crate::output::ListJournalS3ExportsOutput) pub mod list_journal_s3_exports_output { /// A builder for [`ListJournalS3ExportsOutput`](crate::output::ListJournalS3ExportsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { journal_s3_exports: std::option::Option<std::vec::Vec<crate::model::JournalS3ExportDescription>>, next_token: std::option::Option<std::string::String>, } impl Builder { pub fn journal_s3_exports( mut self, inp: impl Into<crate::model::JournalS3ExportDescription>, ) -> Self { let mut v = self.journal_s3_exports.unwrap_or_default(); v.push(inp.into()); self.journal_s3_exports = Some(v); self } pub fn set_journal_s3_exports( mut self, inp: std::option::Option<std::vec::Vec<crate::model::JournalS3ExportDescription>>, ) -> Self { self.journal_s3_exports = inp; self } /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, then the last page of results has been /// processed and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, then there are more /// results available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent <code>ListJournalS3Exports</code> /// call.</p> /// </li> /// </ul> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.next_token = Some(inp.into()); self } pub fn set_next_token(mut self, inp: std::option::Option<std::string::String>) -> Self { self.next_token = inp; self } /// Consumes the builder and constructs a [`ListJournalS3ExportsOutput`](crate::output::ListJournalS3ExportsOutput) pub fn build(self) -> crate::output::ListJournalS3ExportsOutput { crate::output::ListJournalS3ExportsOutput { journal_s3_exports: self.journal_s3_exports, next_token: self.next_token, } } } } impl ListJournalS3ExportsOutput { /// Creates a new builder-style object to manufacture [`ListJournalS3ExportsOutput`](crate::output::ListJournalS3ExportsOutput) pub fn builder() -> crate::output::list_journal_s3_exports_output::Builder { crate::output::list_journal_s3_exports_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListJournalKinesisStreamsForLedgerOutput { /// <p>The array of QLDB journal stream descriptors that are associated with the given /// ledger.</p> pub streams: std::option::Option<std::vec::Vec<crate::model::JournalKinesisStreamDescription>>, /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, the last page of results has been processed /// and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, more results are /// available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent /// <code>ListJournalKinesisStreamsForLedger</code> call.</p> /// </li> /// </ul> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListJournalKinesisStreamsForLedgerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListJournalKinesisStreamsForLedgerOutput"); formatter.field("streams", &self.streams); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListJournalKinesisStreamsForLedgerOutput`](crate::output::ListJournalKinesisStreamsForLedgerOutput) pub mod list_journal_kinesis_streams_for_ledger_output { /// A builder for [`ListJournalKinesisStreamsForLedgerOutput`](crate::output::ListJournalKinesisStreamsForLedgerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { streams: std::option::Option<std::vec::Vec<crate::model::JournalKinesisStreamDescription>>, next_token: std::option::Option<std::string::String>, } impl Builder { pub fn streams( mut self, inp: impl Into<crate::model::JournalKinesisStreamDescription>, ) -> Self { let mut v = self.streams.unwrap_or_default(); v.push(inp.into()); self.streams = Some(v); self } pub fn set_streams( mut self, inp: std::option::Option<std::vec::Vec<crate::model::JournalKinesisStreamDescription>>, ) -> Self { self.streams = inp; self } /// <ul> /// <li> /// <p>If <code>NextToken</code> is empty, the last page of results has been processed /// and there are no more results to be retrieved.</p> /// </li> /// <li> /// <p>If <code>NextToken</code> is <i>not</i> empty, more results are /// available. To retrieve the next page of results, use the value of /// <code>NextToken</code> in a subsequent /// <code>ListJournalKinesisStreamsForLedger</code> call.</p> /// </li> /// </ul> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.next_token = Some(inp.into()); self } pub fn set_next_token(mut self, inp: std::option::Option<std::string::String>) -> Self { self.next_token = inp; self } /// Consumes the builder and constructs a [`ListJournalKinesisStreamsForLedgerOutput`](crate::output::ListJournalKinesisStreamsForLedgerOutput) pub fn build(self) -> crate::output::ListJournalKinesisStreamsForLedgerOutput { crate::output::ListJournalKinesisStreamsForLedgerOutput { streams: self.streams, next_token: self.next_token, } } } } impl ListJournalKinesisStreamsForLedgerOutput { /// Creates a new builder-style object to manufacture [`ListJournalKinesisStreamsForLedgerOutput`](crate::output::ListJournalKinesisStreamsForLedgerOutput) pub fn builder() -> crate::output::list_journal_kinesis_streams_for_ledger_output::Builder { crate::output::list_journal_kinesis_streams_for_ledger_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetRevisionOutput { /// <p>The proof object in Amazon Ion format returned by a <code>GetRevision</code> request. A /// proof contains the list of hash values that are required to recalculate the specified /// digest using a Merkle tree, starting with the specified document revision.</p> pub proof: std::option::Option<crate::model::ValueHolder>, /// <p>The document revision data object in Amazon Ion format.</p> pub revision: std::option::Option<crate::model::ValueHolder>, } impl std::fmt::Debug for GetRevisionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetRevisionOutput"); formatter.field("proof", &"*** Sensitive Data Redacted ***"); formatter.field("revision", &"*** Sensitive Data Redacted ***"); formatter.finish() } } /// See [`GetRevisionOutput`](crate::output::GetRevisionOutput) pub mod get_revision_output { /// A builder for [`GetRevisionOutput`](crate::output::GetRevisionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { proof: std::option::Option<crate::model::ValueHolder>, revision: std::option::Option<crate::model::ValueHolder>, } impl Builder { /// <p>The proof object in Amazon Ion format returned by a <code>GetRevision</code> request. A /// proof contains the list of hash values that are required to recalculate the specified /// digest using a Merkle tree, starting with the specified document revision.</p> pub fn proof(mut self, inp: crate::model::ValueHolder) -> Self { self.proof = Some(inp); self } pub fn set_proof(mut self, inp: std::option::Option<crate::model::ValueHolder>) -> Self { self.proof = inp; self } /// <p>The document revision data object in Amazon Ion format.</p> pub fn revision(mut self, inp: crate::model::ValueHolder) -> Self { self.revision = Some(inp); self } pub fn set_revision(mut self, inp: std::option::Option<crate::model::ValueHolder>) -> Self { self.revision = inp; self } /// Consumes the builder and constructs a [`GetRevisionOutput`](crate::output::GetRevisionOutput) pub fn build(self) -> crate::output::GetRevisionOutput { crate::output::GetRevisionOutput { proof: self.proof, revision: self.revision, } } } } impl GetRevisionOutput { /// Creates a new builder-style object to manufacture [`GetRevisionOutput`](crate::output::GetRevisionOutput) pub fn builder() -> crate::output::get_revision_output::Builder { crate::output::get_revision_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetDigestOutput { /// <p>The 256-bit hash value representing the digest returned by a <code>GetDigest</code> /// request.</p> pub digest: std::option::Option<smithy_types::Blob>, /// <p>The latest block location covered by the digest that you requested. An address is an /// Amazon Ion structure that has two fields: <code>strandId</code> and /// <code>sequenceNo</code>.</p> pub digest_tip_address: std::option::Option<crate::model::ValueHolder>, } impl std::fmt::Debug for GetDigestOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetDigestOutput"); formatter.field("digest", &self.digest); formatter.field("digest_tip_address", &"*** Sensitive Data Redacted ***"); formatter.finish() } } /// See [`GetDigestOutput`](crate::output::GetDigestOutput) pub mod get_digest_output { /// A builder for [`GetDigestOutput`](crate::output::GetDigestOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { digest: std::option::Option<smithy_types::Blob>, digest_tip_address: std::option::Option<crate::model::ValueHolder>, } impl Builder { /// <p>The 256-bit hash value representing the digest returned by a <code>GetDigest</code> /// request.</p> pub fn digest(mut self, inp: smithy_types::Blob) -> Self { self.digest = Some(inp); self } pub fn set_digest(mut self, inp: std::option::Option<smithy_types::Blob>) -> Self { self.digest = inp; self } /// <p>The latest block location covered by the digest that you requested. An address is an /// Amazon Ion structure that has two fields: <code>strandId</code> and /// <code>sequenceNo</code>.</p> pub fn digest_tip_address(mut self, inp: crate::model::ValueHolder) -> Self { self.digest_tip_address = Some(inp); self } pub fn set_digest_tip_address( mut self, inp: std::option::Option<crate::model::ValueHolder>, ) -> Self { self.digest_tip_address = inp; self } /// Consumes the builder and constructs a [`GetDigestOutput`](crate::output::GetDigestOutput) pub fn build(self) -> crate::output::GetDigestOutput { crate::output::GetDigestOutput { digest: self.digest, digest_tip_address: self.digest_tip_address, } } } } impl GetDigestOutput { /// Creates a new builder-style object to manufacture [`GetDigestOutput`](crate::output::GetDigestOutput) pub fn builder() -> crate::output::get_digest_output::Builder { crate::output::get_digest_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetBlockOutput { /// <p>The block data object in Amazon Ion format.</p> pub block: std::option::Option<crate::model::ValueHolder>, /// <p>The proof object in Amazon Ion format returned by a <code>GetBlock</code> request. A /// proof contains the list of hash values required to recalculate the specified digest using a /// Merkle tree, starting with the specified block.</p> pub proof: std::option::Option<crate::model::ValueHolder>, } impl std::fmt::Debug for GetBlockOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetBlockOutput"); formatter.field("block", &"*** Sensitive Data Redacted ***"); formatter.field("proof", &"*** Sensitive Data Redacted ***"); formatter.finish() } } /// See [`GetBlockOutput`](crate::output::GetBlockOutput) pub mod get_block_output { /// A builder for [`GetBlockOutput`](crate::output::GetBlockOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { block: std::option::Option<crate::model::ValueHolder>, proof: std::option::Option<crate::model::ValueHolder>, } impl Builder { /// <p>The block data object in Amazon Ion format.</p> pub fn block(mut self, inp: crate::model::ValueHolder) -> Self { self.block = Some(inp); self } pub fn set_block(mut self, inp: std::option::Option<crate::model::ValueHolder>) -> Self { self.block = inp; self } /// <p>The proof object in Amazon Ion format returned by a <code>GetBlock</code> request. A /// proof contains the list of hash values required to recalculate the specified digest using a /// Merkle tree, starting with the specified block.</p> pub fn proof(mut self, inp: crate::model::ValueHolder) -> Self { self.proof = Some(inp); self } pub fn set_proof(mut self, inp: std::option::Option<crate::model::ValueHolder>) -> Self { self.proof = inp; self } /// Consumes the builder and constructs a [`GetBlockOutput`](crate::output::GetBlockOutput) pub fn build(self) -> crate::output::GetBlockOutput { crate::output::GetBlockOutput { block: self.block, proof: self.proof, } } } } impl GetBlockOutput { /// Creates a new builder-style object to manufacture [`GetBlockOutput`](crate::output::GetBlockOutput) pub fn builder() -> crate::output::get_block_output::Builder { crate::output::get_block_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportJournalToS3Output { /// <p>The unique ID that QLDB assigns to each journal export job.</p> /// <p>To describe your export request and check the status of the job, you can use /// <code>ExportId</code> to call <code>DescribeJournalS3Export</code>.</p> pub export_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for ExportJournalToS3Output { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportJournalToS3Output"); formatter.field("export_id", &self.export_id); formatter.finish() } } /// See [`ExportJournalToS3Output`](crate::output::ExportJournalToS3Output) pub mod export_journal_to_s3_output { /// A builder for [`ExportJournalToS3Output`](crate::output::ExportJournalToS3Output) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { export_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique ID that QLDB assigns to each journal export job.</p> /// <p>To describe your export request and check the status of the job, you can use /// <code>ExportId</code> to call <code>DescribeJournalS3Export</code>.</p> pub fn export_id(mut self, inp: impl Into<std::string::String>) -> Self { self.export_id = Some(inp.into()); self } pub fn set_export_id(mut self, inp: std::option::Option<std::string::String>) -> Self { self.export_id = inp; self } /// Consumes the builder and constructs a [`ExportJournalToS3Output`](crate::output::ExportJournalToS3Output) pub fn build(self) -> crate::output::ExportJournalToS3Output { crate::output::ExportJournalToS3Output { export_id: self.export_id, } } } } impl ExportJournalToS3Output { /// Creates a new builder-style object to manufacture [`ExportJournalToS3Output`](crate::output::ExportJournalToS3Output) pub fn builder() -> crate::output::export_journal_to_s3_output::Builder { crate::output::export_journal_to_s3_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeLedgerOutput { /// <p>The name of the ledger.</p> pub name: std::option::Option<std::string::String>, /// <p>The Amazon Resource Name (ARN) for the ledger.</p> pub arn: std::option::Option<std::string::String>, /// <p>The current status of the ledger.</p> pub state: std::option::Option<crate::model::LedgerState>, /// <p>The date and time, in epoch time format, when the ledger was created. (Epoch time format /// is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)</p> pub creation_date_time: std::option::Option<smithy_types::Instant>, /// <p>The flag that prevents a ledger from being deleted by any user. If not provided on /// ledger creation, this feature is enabled (<code>true</code>) by default.</p> /// <p>If deletion protection is enabled, you must first disable it before you can delete the /// ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the /// <code>UpdateLedger</code> operation to set the flag to <code>false</code>. The QLDB /// console disables deletion protection for you when you use it to delete a ledger.</p> pub deletion_protection: std::option::Option<bool>, } impl std::fmt::Debug for DescribeLedgerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeLedgerOutput"); formatter.field("name", &self.name); formatter.field("arn", &self.arn); formatter.field("state", &self.state); formatter.field("creation_date_time", &self.creation_date_time); formatter.field("deletion_protection", &self.deletion_protection); formatter.finish() } } /// See [`DescribeLedgerOutput`](crate::output::DescribeLedgerOutput) pub mod describe_ledger_output { /// A builder for [`DescribeLedgerOutput`](crate::output::DescribeLedgerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { name: std::option::Option<std::string::String>, arn: std::option::Option<std::string::String>, state: std::option::Option<crate::model::LedgerState>, creation_date_time: std::option::Option<smithy_types::Instant>, deletion_protection: std::option::Option<bool>, } impl Builder { /// <p>The name of the ledger.</p> pub fn name(mut self, inp: impl Into<std::string::String>) -> Self { self.name = Some(inp.into()); self } pub fn set_name(mut self, inp: std::option::Option<std::string::String>) -> Self { self.name = inp; self } /// <p>The Amazon Resource Name (ARN) for the ledger.</p> pub fn arn(mut self, inp: impl Into<std::string::String>) -> Self { self.arn = Some(inp.into()); self } pub fn set_arn(mut self, inp: std::option::Option<std::string::String>) -> Self { self.arn = inp; self } /// <p>The current status of the ledger.</p> pub fn state(mut self, inp: crate::model::LedgerState) -> Self { self.state = Some(inp); self } pub fn set_state(mut self, inp: std::option::Option<crate::model::LedgerState>) -> Self { self.state = inp; self } /// <p>The date and time, in epoch time format, when the ledger was created. (Epoch time format /// is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)</p> pub fn creation_date_time(mut self, inp: smithy_types::Instant) -> Self { self.creation_date_time = Some(inp); self } pub fn set_creation_date_time( mut self, inp: std::option::Option<smithy_types::Instant>, ) -> Self { self.creation_date_time = inp; self } /// <p>The flag that prevents a ledger from being deleted by any user. If not provided on /// ledger creation, this feature is enabled (<code>true</code>) by default.</p> /// <p>If deletion protection is enabled, you must first disable it before you can delete the /// ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the /// <code>UpdateLedger</code> operation to set the flag to <code>false</code>. The QLDB /// console disables deletion protection for you when you use it to delete a ledger.</p> pub fn deletion_protection(mut self, inp: bool) -> Self { self.deletion_protection = Some(inp); self } pub fn set_deletion_protection(mut self, inp: std::option::Option<bool>) -> Self { self.deletion_protection = inp; self } /// Consumes the builder and constructs a [`DescribeLedgerOutput`](crate::output::DescribeLedgerOutput) pub fn build(self) -> crate::output::DescribeLedgerOutput { crate::output::DescribeLedgerOutput { name: self.name, arn: self.arn, state: self.state, creation_date_time: self.creation_date_time, deletion_protection: self.deletion_protection, } } } } impl DescribeLedgerOutput { /// Creates a new builder-style object to manufacture [`DescribeLedgerOutput`](crate::output::DescribeLedgerOutput) pub fn builder() -> crate::output::describe_ledger_output::Builder { crate::output::describe_ledger_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeJournalS3ExportOutput { /// <p>Information about the journal export job returned by a /// <code>DescribeJournalS3Export</code> request.</p> pub export_description: std::option::Option<crate::model::JournalS3ExportDescription>, } impl std::fmt::Debug for DescribeJournalS3ExportOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeJournalS3ExportOutput"); formatter.field("export_description", &self.export_description); formatter.finish() } } /// See [`DescribeJournalS3ExportOutput`](crate::output::DescribeJournalS3ExportOutput) pub mod describe_journal_s3_export_output { /// A builder for [`DescribeJournalS3ExportOutput`](crate::output::DescribeJournalS3ExportOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { export_description: std::option::Option<crate::model::JournalS3ExportDescription>, } impl Builder { /// <p>Information about the journal export job returned by a /// <code>DescribeJournalS3Export</code> request.</p> pub fn export_description(mut self, inp: crate::model::JournalS3ExportDescription) -> Self { self.export_description = Some(inp); self } pub fn set_export_description( mut self, inp: std::option::Option<crate::model::JournalS3ExportDescription>, ) -> Self { self.export_description = inp; self } /// Consumes the builder and constructs a [`DescribeJournalS3ExportOutput`](crate::output::DescribeJournalS3ExportOutput) pub fn build(self) -> crate::output::DescribeJournalS3ExportOutput { crate::output::DescribeJournalS3ExportOutput { export_description: self.export_description, } } } } impl DescribeJournalS3ExportOutput { /// Creates a new builder-style object to manufacture [`DescribeJournalS3ExportOutput`](crate::output::DescribeJournalS3ExportOutput) pub fn builder() -> crate::output::describe_journal_s3_export_output::Builder { crate::output::describe_journal_s3_export_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeJournalKinesisStreamOutput { /// <p>Information about the QLDB journal stream returned by a /// <code>DescribeJournalS3Export</code> request.</p> pub stream: std::option::Option<crate::model::JournalKinesisStreamDescription>, } impl std::fmt::Debug for DescribeJournalKinesisStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeJournalKinesisStreamOutput"); formatter.field("stream", &self.stream); formatter.finish() } } /// See [`DescribeJournalKinesisStreamOutput`](crate::output::DescribeJournalKinesisStreamOutput) pub mod describe_journal_kinesis_stream_output { /// A builder for [`DescribeJournalKinesisStreamOutput`](crate::output::DescribeJournalKinesisStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { stream: std::option::Option<crate::model::JournalKinesisStreamDescription>, } impl Builder { /// <p>Information about the QLDB journal stream returned by a /// <code>DescribeJournalS3Export</code> request.</p> pub fn stream(mut self, inp: crate::model::JournalKinesisStreamDescription) -> Self { self.stream = Some(inp); self } pub fn set_stream( mut self, inp: std::option::Option<crate::model::JournalKinesisStreamDescription>, ) -> Self { self.stream = inp; self } /// Consumes the builder and constructs a [`DescribeJournalKinesisStreamOutput`](crate::output::DescribeJournalKinesisStreamOutput) pub fn build(self) -> crate::output::DescribeJournalKinesisStreamOutput { crate::output::DescribeJournalKinesisStreamOutput { stream: self.stream, } } } } impl DescribeJournalKinesisStreamOutput { /// Creates a new builder-style object to manufacture [`DescribeJournalKinesisStreamOutput`](crate::output::DescribeJournalKinesisStreamOutput) pub fn builder() -> crate::output::describe_journal_kinesis_stream_output::Builder { crate::output::describe_journal_kinesis_stream_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteLedgerOutput {} impl std::fmt::Debug for DeleteLedgerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteLedgerOutput"); formatter.finish() } } /// See [`DeleteLedgerOutput`](crate::output::DeleteLedgerOutput) pub mod delete_ledger_output { /// A builder for [`DeleteLedgerOutput`](crate::output::DeleteLedgerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`DeleteLedgerOutput`](crate::output::DeleteLedgerOutput) pub fn build(self) -> crate::output::DeleteLedgerOutput { crate::output::DeleteLedgerOutput {} } } } impl DeleteLedgerOutput { /// Creates a new builder-style object to manufacture [`DeleteLedgerOutput`](crate::output::DeleteLedgerOutput) pub fn builder() -> crate::output::delete_ledger_output::Builder { crate::output::delete_ledger_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateLedgerOutput { /// <p>The name of the ledger.</p> pub name: std::option::Option<std::string::String>, /// <p>The Amazon Resource Name (ARN) for the ledger.</p> pub arn: std::option::Option<std::string::String>, /// <p>The current status of the ledger.</p> pub state: std::option::Option<crate::model::LedgerState>, /// <p>The date and time, in epoch time format, when the ledger was created. (Epoch time format /// is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)</p> pub creation_date_time: std::option::Option<smithy_types::Instant>, /// <p>The flag that prevents a ledger from being deleted by any user. If not provided on /// ledger creation, this feature is enabled (<code>true</code>) by default.</p> /// <p>If deletion protection is enabled, you must first disable it before you can delete the /// ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the /// <code>UpdateLedger</code> operation to set the flag to <code>false</code>. The QLDB /// console disables deletion protection for you when you use it to delete a ledger.</p> pub deletion_protection: std::option::Option<bool>, } impl std::fmt::Debug for CreateLedgerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateLedgerOutput"); formatter.field("name", &self.name); formatter.field("arn", &self.arn); formatter.field("state", &self.state); formatter.field("creation_date_time", &self.creation_date_time); formatter.field("deletion_protection", &self.deletion_protection); formatter.finish() } } /// See [`CreateLedgerOutput`](crate::output::CreateLedgerOutput) pub mod create_ledger_output { /// A builder for [`CreateLedgerOutput`](crate::output::CreateLedgerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { name: std::option::Option<std::string::String>, arn: std::option::Option<std::string::String>, state: std::option::Option<crate::model::LedgerState>, creation_date_time: std::option::Option<smithy_types::Instant>, deletion_protection: std::option::Option<bool>, } impl Builder { /// <p>The name of the ledger.</p> pub fn name(mut self, inp: impl Into<std::string::String>) -> Self { self.name = Some(inp.into()); self } pub fn set_name(mut self, inp: std::option::Option<std::string::String>) -> Self { self.name = inp; self } /// <p>The Amazon Resource Name (ARN) for the ledger.</p> pub fn arn(mut self, inp: impl Into<std::string::String>) -> Self { self.arn = Some(inp.into()); self } pub fn set_arn(mut self, inp: std::option::Option<std::string::String>) -> Self { self.arn = inp; self } /// <p>The current status of the ledger.</p> pub fn state(mut self, inp: crate::model::LedgerState) -> Self { self.state = Some(inp); self } pub fn set_state(mut self, inp: std::option::Option<crate::model::LedgerState>) -> Self { self.state = inp; self } /// <p>The date and time, in epoch time format, when the ledger was created. (Epoch time format /// is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)</p> pub fn creation_date_time(mut self, inp: smithy_types::Instant) -> Self { self.creation_date_time = Some(inp); self } pub fn set_creation_date_time( mut self, inp: std::option::Option<smithy_types::Instant>, ) -> Self { self.creation_date_time = inp; self } /// <p>The flag that prevents a ledger from being deleted by any user. If not provided on /// ledger creation, this feature is enabled (<code>true</code>) by default.</p> /// <p>If deletion protection is enabled, you must first disable it before you can delete the /// ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the /// <code>UpdateLedger</code> operation to set the flag to <code>false</code>. The QLDB /// console disables deletion protection for you when you use it to delete a ledger.</p> pub fn deletion_protection(mut self, inp: bool) -> Self { self.deletion_protection = Some(inp); self } pub fn set_deletion_protection(mut self, inp: std::option::Option<bool>) -> Self { self.deletion_protection = inp; self } /// Consumes the builder and constructs a [`CreateLedgerOutput`](crate::output::CreateLedgerOutput) pub fn build(self) -> crate::output::CreateLedgerOutput { crate::output::CreateLedgerOutput { name: self.name, arn: self.arn, state: self.state, creation_date_time: self.creation_date_time, deletion_protection: self.deletion_protection, } } } } impl CreateLedgerOutput { /// Creates a new builder-style object to manufacture [`CreateLedgerOutput`](crate::output::CreateLedgerOutput) pub fn builder() -> crate::output::create_ledger_output::Builder { crate::output::create_ledger_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CancelJournalKinesisStreamOutput { /// <p>The unique ID that QLDB assigns to each QLDB journal stream.</p> pub stream_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for CancelJournalKinesisStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CancelJournalKinesisStreamOutput"); formatter.field("stream_id", &self.stream_id); formatter.finish() } } /// See [`CancelJournalKinesisStreamOutput`](crate::output::CancelJournalKinesisStreamOutput) pub mod cancel_journal_kinesis_stream_output { /// A builder for [`CancelJournalKinesisStreamOutput`](crate::output::CancelJournalKinesisStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { stream_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique ID that QLDB assigns to each QLDB journal stream.</p> pub fn stream_id(mut self, inp: impl Into<std::string::String>) -> Self { self.stream_id = Some(inp.into()); self } pub fn set_stream_id(mut self, inp: std::option::Option<std::string::String>) -> Self { self.stream_id = inp; self } /// Consumes the builder and constructs a [`CancelJournalKinesisStreamOutput`](crate::output::CancelJournalKinesisStreamOutput) pub fn build(self) -> crate::output::CancelJournalKinesisStreamOutput { crate::output::CancelJournalKinesisStreamOutput { stream_id: self.stream_id, } } } } impl CancelJournalKinesisStreamOutput { /// Creates a new builder-style object to manufacture [`CancelJournalKinesisStreamOutput`](crate::output::CancelJournalKinesisStreamOutput) pub fn builder() -> crate::output::cancel_journal_kinesis_stream_output::Builder { crate::output::cancel_journal_kinesis_stream_output::Builder::default() } }
45.520307
159
0.635614
9b319ae9b9cdbe2ff3805ea2e6fed3776c419b0e
4,749
// Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 use crate::{ backup_types::transaction::{ backup::{TransactionBackupController, TransactionBackupOpt}, restore::{TransactionRestoreController, TransactionRestoreOpt}, }, storage::{local_fs::LocalFs, BackupStorage}, utils::{ backup_service_client::BackupServiceClient, test_utils::{start_local_backup_service, tmp_db_with_random_content}, ConcurrentDownloadsOpt, GlobalBackupOpt, GlobalRestoreOpt, RocksdbOpt, TrustedWaypointOpt, }, }; use aptos_temppath::TempPath; use aptos_types::transaction::Version; use aptosdb::AptosDB; use std::{convert::TryInto, mem::size_of, sync::Arc}; use storage_interface::DbReader; use tokio::time::Duration; #[test] fn end_to_end() { let (_src_db_dir, src_db, blocks) = tmp_db_with_random_content(); let tgt_db_dir = TempPath::new(); tgt_db_dir.create_as_dir().unwrap(); let backup_dir = TempPath::new(); backup_dir.create_as_dir().unwrap(); let store: Arc<dyn BackupStorage> = Arc::new(LocalFs::new(backup_dir.path().to_path_buf())); let (rt, port) = start_local_backup_service(src_db); let client = Arc::new(BackupServiceClient::new(format!( "http://localhost:{}", port ))); let latest_version = blocks.last().unwrap().1.ledger_info().version(); let total_txns = blocks.iter().fold(0, |x, b| x + b.0.len()); assert_eq!(latest_version as usize + 1, total_txns); let txns = blocks .iter() .map(|(txns, _li)| txns) .flatten() .map(|txn_to_commit| txn_to_commit.transaction()) .collect::<Vec<_>>(); let max_chunk_size = txns .iter() .map(|t| bcs::to_bytes(t).unwrap().len()) .max() .unwrap() // biggest txn + 115 // size of a serialized TransactionInfo + size_of::<u32>(); // record len header let first_ver_to_backup = (total_txns / 4) as Version; let num_txns_to_backup = total_txns - first_ver_to_backup as usize; let target_version = first_ver_to_backup + total_txns as Version / 2; let num_txns_to_restore = (target_version - first_ver_to_backup + 1) as usize; let manifest_handle = rt .block_on( TransactionBackupController::new( TransactionBackupOpt { start_version: first_ver_to_backup, num_transactions: num_txns_to_backup, }, GlobalBackupOpt { max_chunk_size }, client, Arc::clone(&store), ) .run(), ) .unwrap(); rt.block_on( TransactionRestoreController::new( TransactionRestoreOpt { manifest_handle, replay_from_version: None, // max }, GlobalRestoreOpt { dry_run: false, db_dir: Some(tgt_db_dir.path().to_path_buf()), target_version: Some(target_version), trusted_waypoints: TrustedWaypointOpt::default(), rocksdb_opt: RocksdbOpt::default(), concurernt_downloads: ConcurrentDownloadsOpt::default(), } .try_into() .unwrap(), store, None, /* epoch_history */ ) .run(), ) .unwrap(); // We don't write down any ledger infos when recovering transactions. State-sync needs to take // care of it before running consensus. The latest transactions are deemed "synced" instead of // "committed" most likely. let tgt_db = AptosDB::new_for_test(&tgt_db_dir); assert_eq!( tgt_db .get_latest_transaction_info_option() .unwrap() .unwrap() .0, target_version, ); let recovered_transactions = tgt_db .get_transactions( first_ver_to_backup, num_txns_to_restore as u64, target_version, true, /* fetch_events */ ) .unwrap(); assert_eq!( recovered_transactions.transactions, txns.into_iter() .skip(first_ver_to_backup as usize) .take(num_txns_to_restore) .cloned() .collect::<Vec<_>>() ); assert_eq!( recovered_transactions.events.unwrap(), blocks .iter() .map(|(txns, _li)| { txns.iter() .map(|txn_to_commit| txn_to_commit.events().to_vec()) }) .flatten() .skip(first_ver_to_backup as usize) .take(num_txns_to_restore) .collect::<Vec<_>>() ); rt.shutdown_timeout(Duration::from_secs(1)); }
33.20979
98
0.587703
03435c50c0e2f34df730dbf20cc4f52a445d9035
2,825
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; use crossterm::style::Color; use printer::{ buffer::Buffer, printer::{default_process_fn, PrintQueue, Printer, PrinterItem}, Result, }; fn main() -> Result<()> { let mut printer = Printer::new(std::io::stdout(), "In: ".into()); printer.print_prompt_if_set()?; std::io::Write::flush(&mut printer.writer.raw)?; let mut buffer = Buffer::new(); loop { let inp = crossterm::event::read()?; match inp { crossterm::event::Event::Key(key) => match key { KeyEvent { code: KeyCode::Char(c), modifiers: KeyModifiers::NONE, } => { buffer.insert(c); printer.print_input(&default_process_fn, &buffer)?; printer.cursor.move_right_unbounded(); } KeyEvent { code: KeyCode::Backspace, .. } => { if !buffer.is_at_start() { buffer.move_backward(); printer.cursor.move_left(); buffer.remove_current_char(); printer.print_input(&default_process_fn, &buffer)?; } } KeyEvent { code: KeyCode::Enter, .. } => { if let Some(mut output) = eval(buffer.to_string()) { output.push_front(PrinterItem::NewLine); printer.print_output(output)?; } buffer.clear(); printer.print_prompt_if_set()?; } KeyEvent { code: KeyCode::Char('c'), modifiers: KeyModifiers::CONTROL, } => break, _ => (), }, _ => (), } std::io::Write::flush(&mut printer.writer.raw)?; } Ok(()) } fn eval(buffer: String) -> Option<PrintQueue> { let mut buffer = buffer.split_whitespace(); let cmd = buffer.next()?; let args: Vec<&str> = buffer.collect(); match (|| -> Result<PrinterItem> { let output = std::process::Command::new(cmd).args(args).output()?; if output.status.success() { Ok(PrinterItem::String( String::from_utf8(output.stdout)?, Color::Blue, )) } else { Ok(PrinterItem::String( String::from_utf8(output.stderr)?, Color::Red, )) } })() { Ok(result) => Some(result.into()), Err(e) => Some(PrinterItem::String(e.to_string(), Color::Red).into()), } }
32.471264
78
0.450973
ffc2ba11ec3e3d0db79873665bc9b1d29c288de5
1,147
use crate::prelude::{RaknetPacket, RaknetPacketData}; #[derive(Debug)] pub struct OpenConnectionRequestTwo { pub magic: [u8; 16], pub server_ip_bytes: [u8; 4], pub port: u16, pub mtu: u16, pub client_guid: i64, } impl RaknetPacketData for OpenConnectionRequestTwo { fn decode(reader: &mut impl crate::prelude::Reader) -> Option<Self> { Some(Self { magic: <[u8; 16]>::decode(reader)?, server_ip_bytes: { reader.next()?; <[u8; 4]>::decode(reader)? }, port: u16::decode(reader)?, mtu: u16::decode(reader)?, client_guid: i64::decode(reader)?, }) } fn encode(&self, writer: &mut impl crate::prelude::Writer) -> Option<()> { self.magic.encode(writer)?; writer.write(4)?; self.server_ip_bytes.encode(writer)?; self.port.encode(writer)?; self.mtu.encode(writer)?; self.client_guid.encode(writer) } } impl RaknetPacket for OpenConnectionRequestTwo { const RANGE: std::ops::Range<u8> = 0x07..0x08; fn id(&self) -> u8 { 0x07 } }
26.674419
78
0.567568
d56ccea735f72669d8c4945d69c336115322e98a
13,528
use rand::Rng; use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; use alloc::vec; use alloc::vec::Vec; use crate::errors::{Error, Result}; use crate::hash::Hash; use crate::key::{self, PrivateKey, PublicKey}; // Encrypts the given message with RSA and the padding // scheme from PKCS#1 v1.5. The message must be no longer than the // length of the public modulus minus 11 bytes. #[inline] pub fn encrypt<R: Rng, PK: PublicKey>(rng: &mut R, pub_key: &PK, msg: &[u8]) -> Result<Vec<u8>> { key::check_public(pub_key)?; let k = pub_key.size(); if msg.len() > k - 11 { return Err(Error::MessageTooLong); } // EM = 0x00 || 0x02 || PS || 0x00 || M let mut em = vec![0u8; k]; em[1] = 2; non_zero_random_bytes(rng, &mut em[2..k - msg.len() - 1]); em[k - msg.len() - 1] = 0; em[k - msg.len()..].copy_from_slice(msg); pub_key.raw_encryption_primitive(&em, pub_key.size()) } /// Decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5. // If an `rng` is passed, it uses RSA blinding to avoid timing side-channel attacks. // // Note that whether this function returns an error or not discloses secret // information. If an attacker can cause this function to run repeatedly and // learn whether each instance returned an error then they can decrypt and // forge signatures as if they had the private key. See // `decrypt_session_key` for a way of solving this problem. #[inline] pub fn decrypt<R: Rng, SK: PrivateKey>( rng: Option<&mut R>, priv_key: &SK, ciphertext: &[u8], ) -> Result<Vec<u8>> { key::check_public(priv_key)?; let (valid, out, index) = decrypt_inner(rng, priv_key, ciphertext)?; if valid == 0 { return Err(Error::Decryption); } Ok(out[index as usize..].to_vec()) } // Calculates the signature of hashed using // RSASSA-PKCS1-V1_5-SIGN from RSA PKCS#1 v1.5. Note that `hashed` must // be the result of hashing the input message using the given hash // function. If hash is `None`, hashed is signed directly. This isn't // advisable except for interoperability. // // If `rng` is not `None` then RSA blinding will be used to avoid timing // side-channel attacks. // // This function is deterministic. Thus, if the set of possible // messages is small, an attacker may be able to build a map from // messages to signatures and identify the signed messages. As ever, // signatures provide authenticity, not confidentiality. #[inline] pub fn sign<R: Rng, SK: PrivateKey>( rng: Option<&mut R>, priv_key: &SK, hash: Option<&Hash>, hashed: &[u8], ) -> Result<Vec<u8>> { let (hash_len, prefix) = hash_info(hash, hashed.len())?; let t_len = prefix.len() + hash_len; let k = priv_key.size(); if k < t_len + 11 { return Err(Error::MessageTooLong); } // EM = 0x00 || 0x01 || PS || 0x00 || T let mut em = vec![0xff; k]; em[0] = 0; em[1] = 1; em[k - t_len - 1] = 0; em[k - t_len..k - hash_len].copy_from_slice(&prefix); em[k - hash_len..k].copy_from_slice(hashed); priv_key.raw_decryption_primitive(rng, &em, priv_key.size()) } /// Verifies an RSA PKCS#1 v1.5 signature. #[inline] pub fn verify<PK: PublicKey>( pub_key: &PK, hash: Option<&Hash>, hashed: &[u8], sig: &[u8], ) -> Result<()> { let (hash_len, prefix) = hash_info(hash, hashed.len())?; let t_len = prefix.len() + hash_len; let k = pub_key.size(); if k < t_len + 11 { return Err(Error::Verification); } let em = pub_key.raw_encryption_primitive(sig, pub_key.size())?; // EM = 0x00 || 0x01 || PS || 0x00 || T let mut ok = em[0].ct_eq(&0u8); ok &= em[1].ct_eq(&1u8); ok &= em[k - hash_len..k].ct_eq(hashed); ok &= em[k - t_len..k - hash_len].ct_eq(&prefix); ok &= em[k - t_len - 1].ct_eq(&0u8); for el in em.iter().skip(2).take(k - t_len - 3) { ok &= el.ct_eq(&0xff) } if ok.unwrap_u8() != 1 { return Err(Error::Verification); } Ok(()) } #[inline] fn hash_info(hash: Option<&Hash>, digest_len: usize) -> Result<(usize, &'static [u8])> { match hash { Some(hash) => { let hash_len = hash.size(); if digest_len != hash_len { return Err(Error::InputNotHashed); } Ok((hash_len, hash.asn1_prefix())) } // this means the data is signed directly None => Ok((digest_len, &[])), } } /// Decrypts ciphertext using `priv_key` and blinds the operation if /// `rng` is given. It returns one or zero in valid that indicates whether the /// plaintext was correctly structured. In either case, the plaintext is /// returned in em so that it may be read independently of whether it was valid /// in order to maintain constant memory access patterns. If the plaintext was /// valid then index contains the index of the original message in em. #[inline] fn decrypt_inner<R: Rng, SK: PrivateKey>( rng: Option<&mut R>, priv_key: &SK, ciphertext: &[u8], ) -> Result<(u8, Vec<u8>, u32)> { let k = priv_key.size(); if k < 11 { return Err(Error::Decryption); } let em = priv_key.raw_decryption_primitive(rng, ciphertext, priv_key.size())?; let first_byte_is_zero = em[0].ct_eq(&0u8); let second_byte_is_two = em[1].ct_eq(&2u8); // The remainder of the plaintext must be a string of non-zero random // octets, followed by a 0, followed by the message. // looking_for_index: 1 iff we are still looking for the zero. // index: the offset of the first zero byte. let mut looking_for_index = 1u8; let mut index = 0u32; for (i, el) in em.iter().enumerate().skip(2) { let equals0 = el.ct_eq(&0u8); index.conditional_assign(&(i as u32), Choice::from(looking_for_index) & equals0); looking_for_index.conditional_assign(&0u8, equals0); } // The PS padding must be at least 8 bytes long, and it starts two // bytes into em. // TODO: WARNING: THIS MUST BE CONSTANT TIME CHECK: // Ref: https://github.com/dalek-cryptography/subtle/issues/20 // This is currently copy & paste from the constant time impl in // go, but very likely not sufficient. let valid_ps = Choice::from((((2i32 + 8i32 - index as i32 - 1i32) >> 31) & 1) as u8); let valid = first_byte_is_zero & second_byte_is_two & Choice::from(!looking_for_index & 1) & valid_ps; index = u32::conditional_select(&0, &(index + 1), valid); Ok((valid.unwrap_u8(), em, index)) } /// Fills the provided slice with random values, which are guranteed /// to not be zero. #[inline] fn non_zero_random_bytes<R: Rng>(rng: &mut R, data: &mut [u8]) { rng.fill(data); for el in data { if *el == 0u8 { // TODO: break after a certain amount of time while *el == 0u8 { *el = rng.gen(); } } } } #[cfg(test)] mod tests { use super::*; use base64; use hex; use num_bigint::BigUint; use num_traits::FromPrimitive; use num_traits::Num; use rand::{SeedableRng, rngs::StdRng}; use sha1::{Digest, Sha1}; use std::time::SystemTime; use crate::{Hash, PaddingScheme, PublicKey, PublicKeyParts, RSAPrivateKey, RSAPublicKey}; #[test] fn test_non_zero_bytes() { for _ in 0..10 { let seed = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); let mut rng = StdRng::seed_from_u64(seed.as_secs()); let mut b = vec![0u8; 512]; non_zero_random_bytes(&mut rng, &mut b); for el in &b { assert_ne!(*el, 0u8); } } } fn get_private_key() -> RSAPrivateKey { // In order to generate new test vectors you'll need the PEM form of this key: // -----BEGIN RSA PRIVATE KEY----- // MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0 // fd7Ai2KW5ToIwzFofvJcS/STa6HA5gQenRUCAwEAAQJBAIq9amn00aS0h/CrjXqu // /ThglAXJmZhOMPVn4eiu7/ROixi9sex436MaVeMqSNf7Ex9a8fRNfWss7Sqd9eWu // RTUCIQDasvGASLqmjeffBNLTXV2A5g4t+kLVCpsEIZAycV5GswIhANEPLmax0ME/ // EO+ZJ79TJKN5yiGBRsv5yvx5UiHxajEXAiAhAol5N4EUyq6I9w1rYdhPMGpLfk7A // IU2snfRJ6Nq2CQIgFrPsWRCkV+gOYcajD17rEqmuLrdIRexpg8N1DOSXoJ8CIGlS // tAboUGBxTDq3ZroNism3DaMIbKPyYrAqhKov1h5V // -----END RSA PRIVATE KEY----- RSAPrivateKey::from_components( BigUint::from_str_radix("9353930466774385905609975137998169297361893554149986716853295022578535724979677252958524466350471210367835187480748268864277464700638583474144061408845077", 10).unwrap(), BigUint::from_u64(65537).unwrap(), BigUint::from_str_radix("7266398431328116344057699379749222532279343923819063639497049039389899328538543087657733766554155839834519529439851673014800261285757759040931985506583861", 10).unwrap(), vec![ BigUint::from_str_radix("98920366548084643601728869055592650835572950932266967461790948584315647051443",10).unwrap(), BigUint::from_str_radix("94560208308847015747498523884063394671606671904944666360068158221458669711639", 10).unwrap() ], ) } #[test] fn test_decrypt_pkcs1v15() { let priv_key = get_private_key(); let tests = [[ "gIcUIoVkD6ATMBk/u/nlCZCCWRKdkfjCgFdo35VpRXLduiKXhNz1XupLLzTXAybEq15juc+EgY5o0DHv/nt3yg==", "x", ], [ "Y7TOCSqofGhkRb+jaVRLzK8xw2cSo1IVES19utzv6hwvx+M8kFsoWQm5DzBeJCZTCVDPkTpavUuEbgp8hnUGDw==", "testing.", ], [ "arReP9DJtEVyV2Dg3dDp4c/PSk1O6lxkoJ8HcFupoRorBZG+7+1fDAwT1olNddFnQMjmkb8vxwmNMoTAT/BFjQ==", "testing.\n", ], [ "WtaBXIoGC54+vH0NH0CHHE+dRDOsMc/6BrfFu2lEqcKL9+uDuWaf+Xj9mrbQCjjZcpQuX733zyok/jsnqe/Ftw==", "01234567890123456789012345678901234567890123456789012", ]]; for test in &tests { let out = priv_key .decrypt( PaddingScheme::new_pkcs1v15_encrypt(), &base64::decode(test[0]).unwrap(), ) .unwrap(); assert_eq!(out, test[1].as_bytes()); } } #[test] fn test_encrypt_decrypt_pkcs1v15() { let seed = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); let mut rng = StdRng::seed_from_u64(seed.as_secs()); let priv_key = get_private_key(); let k = priv_key.size(); for i in 1..100 { let mut input: Vec<u8> = (0..i * 8).map(|_| rng.gen()).collect(); if input.len() > k - 11 { input = input[0..k - 11].to_vec(); } let pub_key: RSAPublicKey = priv_key.clone().into(); let ciphertext = encrypt(&mut rng, &pub_key, &input).unwrap(); assert_ne!(input, ciphertext); let blind: bool = rng.gen(); let blinder = if blind { Some(&mut rng) } else { None }; let plaintext = decrypt(blinder, &priv_key, &ciphertext).unwrap(); assert_eq!(input, plaintext); } } #[test] fn test_sign_pkcs1v15() { let priv_key = get_private_key(); let tests = [[ "Test.\n", "a4f3fa6ea93bcdd0c57be020c1193ecbfd6f200a3d95c409769b029578fa0e336ad9a347600e40d3ae823b8c7e6bad88cc07c1d54c3a1523cbbb6d58efc362ae" ]]; for test in &tests { let digest = Sha1::digest(test[0].as_bytes()).to_vec(); let expected = hex::decode(test[1]).unwrap(); let out = priv_key .sign(PaddingScheme::new_pkcs1v15_sign(Some(Hash::SHA1)), &digest) .unwrap(); assert_ne!(out, digest); assert_eq!(out, expected); let seed = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); let mut rng = StdRng::seed_from_u64(seed.as_secs()); let out2 = priv_key .sign_blinded( &mut rng, PaddingScheme::new_pkcs1v15_sign(Some(Hash::SHA1)), &digest, ) .unwrap(); assert_eq!(out2, expected); } } #[test] fn test_verify_pkcs1v15() { let priv_key = get_private_key(); let tests = [[ "Test.\n", "a4f3fa6ea93bcdd0c57be020c1193ecbfd6f200a3d95c409769b029578fa0e336ad9a347600e40d3ae823b8c7e6bad88cc07c1d54c3a1523cbbb6d58efc362ae" ]]; let pub_key: RSAPublicKey = priv_key.into(); for test in &tests { let digest = Sha1::digest(test[0].as_bytes()).to_vec(); let sig = hex::decode(test[1]).unwrap(); pub_key .verify( PaddingScheme::new_pkcs1v15_sign(Some(Hash::SHA1)), &digest, &sig, ) .expect("failed to verify"); } } #[test] fn test_unpadded_signature() { let msg = b"Thu Dec 19 18:06:16 EST 2013\n"; let expected_sig = base64::decode("pX4DR8azytjdQ1rtUiC040FjkepuQut5q2ZFX1pTjBrOVKNjgsCDyiJDGZTCNoh9qpXYbhl7iEym30BWWwuiZg==").unwrap(); let priv_key = get_private_key(); let sig = priv_key .sign(PaddingScheme::new_pkcs1v15_sign(None), msg) .unwrap(); assert_eq!(expected_sig, sig); let pub_key: RSAPublicKey = priv_key.into(); pub_key .verify(PaddingScheme::new_pkcs1v15_sign(None), msg, &sig) .expect("failed to verify"); } }
35.137662
207
0.619826
f478b70a7903bf87b41b7ff4cae4c1aa40f94c70
556
pub use std::borrow::Cow; pub use std::collections::BTreeMap; pub use std::collections::HashMap; pub use std::collections::LinkedList; pub use std::ffi::OsStr; pub use std::fmt::Debug; pub use std::fmt::Error as FmtError; pub use std::fmt::Formatter; pub use std::mem; pub use std::os::unix::ffi::OsStrExt; pub use std::path::Path; pub use std::path::PathBuf; pub use byteorder::ByteOrder; pub use byteorder::LittleEndian; pub mod flate2 { pub use flate2::*; } pub mod minilzo { pub use minilzo::*; } pub use output::Output; pub use super::*;
19.857143
37
0.701439
0a2abbe313acd8a6e974dcb111ca79f8cbabf517
1,412
use anyhow::Result; pub fn get_config_path(extension_name: &str) -> Result<std::path::PathBuf> { let config_paths = crate::config::Paths::new()?; Ok(config_paths.extensions_directory.join(format!( "{extension_name}.json", extension_name = extension_name ))) } pub fn filter_results<'a, 'b, T>( extensions: &'a Vec<Box<dyn openfare_lib::extension::Extension>>, results: &'b Vec<Result<T>>, ) -> Result<Vec<(&'a Box<dyn openfare_lib::extension::Extension>, &'b T)>> { let mut filtered_results = vec![]; for (extension, result) in extensions.iter().zip(results.iter()) { log::debug!( "Inspecting result from extension: {name} ({version})", name = extension.name(), version = extension.version() ); let result = match result { Ok(result) => { log::debug!( "Found Ok result from extension: {name}", name = extension.name(), ); result } Err(error) => { log::error!( "Extension {name} error: {error}", name = extension.name(), error = error ); continue; } }; filtered_results.push((extension.clone(), result.clone())); } Ok(filtered_results) }
32.090909
76
0.510623
22ab41020a49e6060c70c000d70f2b1d7c5adad6
4,188
//! This library is used to define types that implement the trait `FiniteElement` //! //! The types are defined by procedural macros. To create a new type, one must first define an //! unit-like Structure and implement the trait `AutoImplementable` for it. Once the trait //! `AutoImplementable` has been implemented it is possible to define a proc macro that will //! generate the deffinition of a new type and an implementation of the trait `FiniteElement` for //! it. //! //! # Creation of a new macro //! //! To create a new macro, one must first define a Zero-Sized `struct`, and implement the trait //! `AutoImplementable` for it. For example if we want to create an element representing a spring, //! we first create a zero-sized `struct`: `pub struct _Spring{}` and the implement the trait //! `Autotimplementable` for it. //! //! The types that implement the trait `AutoImplementable` can be passed as type argument to the //! function `macro_def<F: Float, T: AutoImplementable<F>>() -> TokenStream`. This function //! can be used to define a procedural macro that will generate the code defining the //! corresponding structure and its implementation of the trait `FiniteElement`. //! Complete example //! (copied-pasted from `spring.rs`) //! ``` //! //!use crate::formal::{Formal, FormalVector, FormalPoint, Float}; //!use crate::autoimplementable::AutoImplementable; //! //!use std::collections::HashMap; //! //!// A `Spring` likes it when `a` and `b` are at distance `l`, and //!// exerts a force of `k.(|ab| - l)` to achieve this. //!pub struct _Spring{} //! //!impl<F: Float> AutoImplementable<F> for _Spring { //! fn struct_name() -> String { //! String::from("Spring") //! } //! //! fn elt_list() -> Vec<String> { //! vec![String::from("a"), String::from("b")] //! } //! //! fn cst_list() -> Vec<String> { //! vec![String::from("l"), String::from("k")] //! } //! //! fn formal_map() -> HashMap<String, FormalVector<F>> { //! //Create a `Formal` for each element coordiate and each constants //! let point_a = FormalPoint { //! x: Formal::new_var(0), //! y: Formal::new_var(1), //! z: Formal::new_var(2) //! }; //! //! let point_b = FormalPoint { //! x: Formal::new_var(3), //! y: Formal::new_var(4), //! z: Formal::new_var(5) //! }; //! //! let cst_l = Formal::new_var(6); //! let cst_k = Formal::new_var(7); //! //! // The force applied on point a is k(|ab| - l) * ab/|ab| //! let ab = point_b - point_a; //! let force_a: FormalVector<F> = (ab.clone().norm() - cst_l.clone()) * ab.clone()/ab.clone().norm() * cst_k.clone(); //! //! // The force applied on point b is k(|ba| - l) * ba/|ba| //! let force_b = (ab.clone().norm() - cst_l.clone()) * ab.clone()/ab.clone().norm() * cst_k.clone() * Formal::new_cst(F::one().neg()); //! let mut ret = HashMap::new(); //! ret.insert(String::from("a"), force_a); //! ret.insert(String::from("b"), force_b); //! ret //! } //! //!} //! //!// Once the trait is implemented, we can write a procedural macro //!#[proc_macro] //!pub fn auto_impl_spring(_item: TokenStream) -> TokenStream { //! macro_def::<f32, _Spring>() //!} //! ``` //! //! //! extern crate proc_macro; /// Defines the trait `AutoImplementable` mod autoimplementable; /// Defines a type to represent mathematical formulas whose derivatives can be computed /// automatically mod formal; #[doc(hidden)] mod spring; #[doc(hidden)] mod stack; pub (crate) use autoimplementable::*; use proc_macro::TokenStream; use spring::_Spring; use stack::_Stack; /// A `Spring` likes it when `a` and `b` are at distance `l`, and /// exerts a force of `k.(|ab| - l)` to achieve this. #[proc_macro] pub fn auto_impl_spring(_item: TokenStream) -> TokenStream { macro_def::<f32, _Spring>() } /// A `Stack` likes it when the angle between `ab` and `cd` is equal to `theta0`, and /// exerts a torque of `k.(theta - theta0)` to achieve this. #[proc_macro] pub fn auto_impl_stack(_item: TokenStream) -> TokenStream { macro_def::<f32, _Stack<f32>>() }
35.193277
142
0.621538
ace27fdc2cc983fa688edae4cc5e6257e2ef4dd5
3,760
use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; #[derive(Debug)] pub struct Url(url::Url); impl TryFrom<String> for Url { type Error = url::ParseError; fn try_from(value: String) -> Result<Self, Self::Error> { (&value).try_into() } } impl<'s> TryFrom<&'s str> for Url { type Error = url::ParseError; fn try_from(value: &'s str) -> Result<Self, Self::Error> { Ok(Url(value.parse()?)) } } impl<'s> TryFrom<&'s String> for Url { type Error = url::ParseError; fn try_from(value: &'s String) -> Result<Self, Self::Error> { (value.as_str()).try_into() } } impl Url { #[allow(dead_code)] pub(crate) fn as_str(&self) -> &str { self.0.as_str() } pub fn host(&self) -> Option<&str> { match self.0.host_str()? { "" => None, host => Some(host), } } pub fn port(&self, default: u16) -> u16 { self.0.port().unwrap_or(default) } pub fn username(&self) -> Option<Cow<str>> { let username = self.0.username(); if username.is_empty() { None } else { Some( percent_encoding::percent_decode_str(username) .decode_utf8() .expect("percent-encoded username contained non-UTF-8 bytes"), ) } } pub fn password(&self) -> Option<Cow<str>> { match self.0.password() { Some(s) => { let decoded = percent_encoding::percent_decode_str(s); // FIXME: Handle error Some( decoded .decode_utf8() .expect("percent-encoded password contained non-UTF-8 bytes"), ) } None => None, } } /// Undo URL percent-encoding and return [authority]path[query] /// /// Mostly a hack to fix special-character handling for SQLite as its connection string is a /// file path and not _really_ a URL pub fn path_decoded(&self) -> Cow<str> { // omit scheme (e.g. `sqlite://`, `mysql://`) let url_str = &self.0.as_str()[self.0.scheme().len()..] .trim_start_matches(':') .trim_start_matches("//"); // decode percent_encoding::percent_decode_str(url_str) .decode_utf8() .expect("percent-encoded path contained non-UTF-8 bytes") } pub fn database(&self) -> Option<&str> { let database = self.0.path().trim_start_matches('/'); if database.is_empty() { None } else { Some(database) } } pub fn param(&self, key: &str) -> Option<Cow<str>> { self.0 .query_pairs() .find_map(|(key_, val)| if key == key_ { Some(val) } else { None }) } } #[cfg(test)] mod tests { use super::*; #[test] fn azure_connection_string_username_unencoded() { let connection_string = "postgres://username@servername:[email protected]/db"; let url = Url::try_from(connection_string).expect("Failed to parse URL"); assert_eq!( url.username().map(|u| u.to_string()), Some(String::from("username@servername")) ); } #[test] fn azure_connection_string_username_encoded() { let connection_string = "postgres://username%40servername:[email protected]/db"; let url = Url::try_from(connection_string).expect("Failed to parse URL"); assert_eq!( url.username().map(|u| u.to_string()), Some(String::from("username@servername")) ); } }
26.666667
96
0.534574
7a2ed750eedbd92f128466f45de37cf080784b17
1,671
//! Basic tests for the `run` subcommand use anyhow::bail; use std::process::Command; use wasmer_integration_tests_cli::{ASSET_PATH, C_ASSET_PATH, WASMER_PATH}; fn wasi_test_wasm_path() -> String { format!("{}/{}", C_ASSET_PATH, "qjs.wasm") } fn test_no_imports_wat_path() -> String { format!("{}/{}", ASSET_PATH, "fib.wat") } #[test] fn run_wasi_works() -> anyhow::Result<()> { let output = Command::new(WASMER_PATH) .arg("run") .arg(wasi_test_wasm_path()) .arg("--") .arg("-e") .arg("print(3 * (4 + 5))") .output()?; if !output.status.success() { bail!( "linking failed with: stdout: {}\n\nstderr: {}", std::str::from_utf8(&output.stdout) .expect("stdout is not utf8! need to handle arbitrary bytes"), std::str::from_utf8(&output.stderr) .expect("stderr is not utf8! need to handle arbitrary bytes") ); } let stdout_output = std::str::from_utf8(&output.stdout).unwrap(); assert_eq!(stdout_output, "27\n"); Ok(()) } #[test] fn run_no_imports_wasm_works() -> anyhow::Result<()> { let output = Command::new(WASMER_PATH) .arg("run") .arg(test_no_imports_wat_path()) .output()?; if !output.status.success() { bail!( "linking failed with: stdout: {}\n\nstderr: {}", std::str::from_utf8(&output.stdout) .expect("stdout is not utf8! need to handle arbitrary bytes"), std::str::from_utf8(&output.stderr) .expect("stderr is not utf8! need to handle arbitrary bytes") ); } Ok(()) }
27.85
78
0.564931
f9e622f25ba4df55b4ea0f4987448041bf05d3e4
807
#[derive(PartialEq)] enum size_enum { BIG, MEDIUM, SMALL } fn to_str(t: size_enum) -> String { if t == size_enum::BIG { return "BIG".to_string(); } else if t == size_enum::MEDIUM { return "MEDIUM".to_string(); } else { return "SMALL".to_string(); } } fn main() { const MAX: usize = 100; let mut arr: [i64; MAX] = [0; MAX]; let mut i: usize = 0; let mut ii: i64 = 0; while i < MAX { arr[i] = ii; i += 1; ii += 1; } let mut t: size_enum; for x in arr { if x < 33 { t = size_enum::SMALL; } else if x < 66 { t = size_enum::MEDIUM; } else { t = size_enum::BIG; } println!("{} is {}", x, to_str(t)); } }
18.767442
43
0.44114
fef772541b2ab9963b916ecfe0e4f7033a1639a0
82
mod search; fn main() { let size = 1000000; search::search_last_in_list(size); }
11.714286
34
0.707317
1c8d67cc001b624f4e37aa85c2387d5edd81ece0
398
fn buy_groceries(shopping_list: &mut Vec<&str>) { println!("Going out to buy: {:?}", shopping_list); shopping_list.remove(0); println!("Couldn't find: {:?}", shopping_list); } fn main() { let mut shopping_list: Vec<&str> = vec!["Pasta", "Milk", "Toilet Paper"]; buy_groceries(&mut shopping_list); shopping_list.push("Chocolate"); buy_groceries(&mut shopping_list); }
26.533333
77
0.650754
261414d49c63a3b83faff883379ba181421981ab
28,809
// TODO: do not perform S-box and MDS mixing on first round's `state[0]`. // TODO: perform MDS mixing exclusively on `state[1]` in the last round. const WIDTH: usize = 3; #[allow(non_upper_case_globals)] const R_f: usize = 3; const R_P: usize = 2; const R: usize = 2 * R_f + R_P; use halo2::{ arithmetic::{Field, FieldExt}, circuit::{layouter::SingleChipLayouter, Cell, Chip, Layouter}, dev::{MockProver, VerifyFailure}, pasta::Fp, plonk::{ Advice, Assignment, Circuit, Column, ConstraintSystem, Error, Expression, Fixed, Instance, Permutation, Selector, }, poly::Rotation, }; use lazy_static::lazy_static; use rand::{thread_rng, Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; lazy_static! { /* static ref PRE_KEYS: Vec<Fp> = rand_pre_keys([1; 32]); static ref POST_KEYS: Vec<Vec<Fp>> = rand_post_keys([2; 32]); static ref MDS: Vec<Vec<Fp>> = rand_matrix([3; 32]); static ref PRE_SPARSE: Vec<Vec<Fp>> = rand_matrix([4; 32]); static ref SPARSE: Vec<Vec<Vec<Fp>>> = (0..R_P) .map(|i| rand_matrix([5 + i as u8; 32])) .collect(); */ static ref PRE_KEYS: Vec<Fp> = vec![Fp::from(1), Fp::from(2), Fp::from(3)]; static ref POST_KEYS: Vec<Vec<Fp>> = vec![ vec![Fp::from(1), Fp::from(2), Fp::from(3)], vec![Fp::from(4), Fp::from(5), Fp::from(6)], vec![Fp::from(7), Fp::from(8), Fp::from(9)], vec![Fp::from(1)], vec![Fp::from(2)], vec![Fp::from(3), Fp::from(4), Fp::from(5)], vec![Fp::from(6), Fp::from(7), Fp::from(8)], vec![], ]; static ref MDS: Vec<Vec<Fp>> = vec![ vec![Fp::from(1), Fp::from(2), Fp::from(3)], vec![Fp::from(4), Fp::from(5), Fp::from(6)], vec![Fp::from(7), Fp::from(8), Fp::from(9)], ]; static ref PRE_SPARSE: Vec<Vec<Fp>> = vec![ vec![Fp::from(3), Fp::from(4), Fp::from(5)], vec![Fp::from(6), Fp::from(7), Fp::from(8)], vec![Fp::from(9), Fp::from(1), Fp::from(2)], ]; static ref SPARSE: Vec<Vec<Vec<Fp>>> = vec![ vec![ vec![Fp::from(5), Fp::from(6), Fp::from(7)], vec![Fp::from(8), Fp::from(9), Fp::from(1)], vec![Fp::from(2), Fp::from(3), Fp::from(4)], ], vec![ vec![Fp::from(7), Fp::from(8), Fp::from(9)], vec![Fp::from(1), Fp::from(2), Fp::from(3)], vec![Fp::from(4), Fp::from(5), Fp::from(6)], ], ]; } fn rand_pre_keys(seed: [u8; 32]) -> Vec<Fp> { let mut rng = ChaCha8Rng::from_seed(seed); (0..WIDTH) .map(|_| { // Fp::random(&mut rng) Fp::from(rng.gen::<u64>()) }) .collect() } fn rand_post_keys(seed: [u8; 32]) -> Vec<Vec<Fp>> { let mut rng = ChaCha8Rng::from_seed(seed); (0..R) .map(|round| { if is_full_round(round) && round != R - 1 { // (0..WIDTH).map(|_| Fp::random(&mut rng)).collect() (0..WIDTH).map(|_| Fp::from(rng.gen::<u64>())).collect() } else if is_partial_round(round) { // vec![Fp::random(&mut rng)] vec![Fp::from(rng.gen::<u64>())] } else { vec![] } }) .collect() } fn rand_matrix(seed: [u8; 32]) -> Vec<Vec<Fp>> { let mut rng = ChaCha8Rng::from_seed(seed); (0..WIDTH) // .map(|_| (0..WIDTH).map(|_| Fp::random(&mut rng)).collect()) .map(|_| (0..WIDTH).map(|_| Fp::from(rng.gen::<u64>())).collect()) .collect() } fn is_full_round(round: usize) -> bool { round < R_f || round >= R_f + R_P } fn is_partial_round(round: usize) -> bool { round >= R_f && round < R_f + R_P } fn pow5(x: Fp) -> Fp { x.square().square() * x } fn sbox_pre_post(state: &[Fp]) -> Vec<Fp> { (0..WIDTH) .map(|i| pow5(state[i] + PRE_KEYS[i]) + POST_KEYS[0][i]) .collect() } fn sbox_post(state: &[Fp], post_keys: &[Fp]) -> Vec<Fp> { (0..WIDTH) .map(|i| pow5(state[i]) + post_keys[i]) .collect() } fn sbox_no_add(state: &[Fp]) -> Vec<Fp> { (0..WIDTH) .map(|i| pow5(state[i])) .collect() } fn vec_matrix_mul(v: &[Fp], m: &[Vec<Fp>]) -> Vec<Fp> { let n = v.len(); assert_eq!(m.len(), n); (0..n) .map(|col| { let mut lc = Fp::zero(); for i in 0..n { lc += v[i] * m[i][col]; } lc }) .collect() } #[derive(Clone, Debug)] struct Alloc { cell: Cell, value: Fp, } #[derive(Clone, Debug)] enum MaybeAlloc { Allocated(Alloc), Unallocated(Fp), } impl MaybeAlloc { fn value(&self) -> Fp { match self { MaybeAlloc::Allocated(alloc) => alloc.value.clone(), MaybeAlloc::Unallocated(value) => value.clone(), } } fn cell(&self) -> Cell { match self { MaybeAlloc::Allocated(alloc) => alloc.cell.clone(), MaybeAlloc::Unallocated(_) => panic!("called `MaybeAlloc.cell()` on an unallocated value"), } } fn is_allocated(&self) -> bool { match self { MaybeAlloc::Allocated(_) => true, MaybeAlloc::Unallocated(_) => false, } } fn is_unallocated(&self) -> bool { !self.is_allocated() } } struct PoseidonChip { config: PoseidonChipConfig, } #[derive(Clone, Debug)] struct PoseidonChipConfig { a_col: Column<Advice>, sbox_out_col: Column<Advice>, mds_out_col: Column<Advice>, pre_key_col: Column<Fixed>, post_key_col: Column<Fixed>, mds_cols: Vec<Column<Fixed>>, s_sbox_pre_post: Selector, s_sbox_post: Selector, s_sbox_no_add: Selector, s_mds: Vec<Selector>, perm_output_to_input: Permutation, perm_output_to_sbox_output: Permutation, } impl Chip<Fp> for PoseidonChip { type Config = PoseidonChipConfig; type Loaded = (); fn config(&self) -> &Self::Config { &self.config } fn loaded(&self) -> &Self::Loaded { &() } } impl PoseidonChip { fn new(config: PoseidonChipConfig) -> Self { PoseidonChip { config } } fn configure(cs: &mut ConstraintSystem<Fp>, digest_col: Column<Advice>) -> PoseidonChipConfig { let a_col = cs.advice_column(); let sbox_out_col = cs.advice_column(); let mds_out_col = digest_col; let pre_key_col = cs.fixed_column(); let post_key_col = cs.fixed_column(); let mds_cols = vec![cs.fixed_column(), cs.fixed_column(), cs.fixed_column()]; let s_sbox_pre_post = cs.selector(); let s_sbox_post = cs.selector(); let s_sbox_no_add = cs.selector(); let s_mds = vec![cs.selector(), cs.selector(), cs.selector()]; cs.create_gate("s_sbox_pre_post", |cs| { let a = cs.query_advice(a_col, Rotation::cur()); let pre_key = cs.query_fixed(pre_key_col, Rotation::cur()); let post_key = cs.query_fixed(post_key_col, Rotation::cur()); let sbox_out = cs.query_advice(sbox_out_col, Rotation::cur()); let s_sbox_pre_post = cs.query_selector(s_sbox_pre_post, Rotation::cur()); // (a + pre_key)^5 + post_key = out let a_plus_pre = a + pre_key; s_sbox_pre_post * ( a_plus_pre.clone() * a_plus_pre.clone() * a_plus_pre.clone() * a_plus_pre.clone() * a_plus_pre + post_key - sbox_out ) }); cs.create_gate("s_sbox_post", |cs| { let a = cs.query_advice(a_col, Rotation::cur()); let post_key = cs.query_fixed(post_key_col, Rotation::cur()); let sbox_out = cs.query_advice(sbox_out_col, Rotation::cur()); let s_sbox_post = cs.query_selector(s_sbox_post, Rotation::cur()); // a^5 + post_key = b s_sbox_post * (a.clone() * a.clone() * a.clone() * a.clone() * a + post_key - sbox_out) }); cs.create_gate("s_sbox_no_add", |cs| { let a = cs.query_advice(a_col, Rotation::cur()); let sbox_out = cs.query_advice(sbox_out_col, Rotation::cur()); let s_sbox_no_add = cs.query_selector(s_sbox_no_add, Rotation::cur()); // a^5 = b s_sbox_no_add * (a.clone() * a.clone() * a.clone() * a.clone() * a - sbox_out) }); // Calculates the dot product of the sbox outputs with column `0` of the MDS matrix. Note // that `s_mds_0` is enabled in the first MDS row. cs.create_gate("s_mds_0", |cs| { let sbox_out_0 = cs.query_advice(sbox_out_col, Rotation::cur()); let sbox_out_1 = cs.query_advice(sbox_out_col, Rotation::next()); let sbox_out_2 = cs.query_advice(sbox_out_col, Rotation(2)); let mds_out_0 = cs.query_advice(mds_out_col, Rotation::cur()); let s_mds_0 = cs.query_selector(s_mds[0], Rotation::cur()); // The first MDS column. let m_0 = cs.query_fixed(mds_cols[0], Rotation::cur()); let m_1 = cs.query_fixed(mds_cols[0], Rotation::next()); let m_2 = cs.query_fixed(mds_cols[0], Rotation(2)); // Dot product of sbox outputs with the first MDS column. s_mds_0 * (sbox_out_0 * m_0 + sbox_out_1 * m_1 + sbox_out_2 * m_2 - mds_out_0) }); // Calculates the dot product of the sbox outputs with column `1` of the MDS matrix. Note // that `s_mds_1` is enabled in the second MDS row. cs.create_gate("s_mds_1", |cs| { let sbox_out_0 = cs.query_advice(sbox_out_col, Rotation::prev()); let sbox_out_1 = cs.query_advice(sbox_out_col, Rotation::cur()); let sbox_out_2 = cs.query_advice(sbox_out_col, Rotation::next()); let mds_out_1 = cs.query_advice(mds_out_col, Rotation::cur()); let s_mds_1 = cs.query_selector(s_mds[1], Rotation::cur()); // The second MDS column. let m_0 = cs.query_fixed(mds_cols[1], Rotation::prev()); let m_1 = cs.query_fixed(mds_cols[1], Rotation::cur()); let m_2 = cs.query_fixed(mds_cols[1], Rotation::next()); // Dot product of the sbox outputs with the second MDS column. s_mds_1 * (sbox_out_0 * m_0 + sbox_out_1 * m_1 + sbox_out_2 * m_2 - mds_out_1) }); // Calculates the dot product of the sbox outputs with column `2` of the MDS matrix. Note // that `s_mds_2` is enabled in the third MDS row. cs.create_gate("s_mds_2", |cs| { let sbox_out_0 = cs.query_advice(sbox_out_col, Rotation(-2)); let sbox_out_1 = cs.query_advice(sbox_out_col, Rotation::prev()); let sbox_out_2 = cs.query_advice(sbox_out_col, Rotation::cur()); let mds_out_2 = cs.query_advice(mds_out_col, Rotation::cur()); let s_mds_2 = cs.query_selector(s_mds[2], Rotation::cur()); // The third MDS column. let m_0 = cs.query_fixed(mds_cols[2], Rotation(-2)); let m_1 = cs.query_fixed(mds_cols[2], Rotation::prev()); let m_2 = cs.query_fixed(mds_cols[2], Rotation::cur()); // Dot product of the sbox outputs with the third MDS column. s_mds_2 * (sbox_out_0 * m_0 + sbox_out_1 * m_1 + sbox_out_2 * m_2 - mds_out_2) }); // Copies a round's MDS output into the next round's state. let perm_output_to_input = Permutation::new(cs, &[mds_out_col.into(), a_col.into()]); // Copies a round's MDS output into the next round's sbox output. let perm_output_to_sbox_output = Permutation::new(cs, &[mds_out_col.into(), sbox_out_col.into()]); PoseidonChipConfig { a_col, sbox_out_col, mds_out_col, pre_key_col, post_key_col, mds_cols, s_sbox_pre_post, s_sbox_post, s_sbox_no_add, s_mds, perm_output_to_input, perm_output_to_sbox_output, } } fn alloc_full_round( &self, layouter: &mut impl Layouter<Fp>, // We need to pass in the `state` as `MaybeAlloc` to allow `alloc_full_round()` to be called // for the first round, i.e. the state values are unallocated prior to the first round. state: &[MaybeAlloc], round: usize, ) -> Result<Vec<MaybeAlloc>, Error> { dbg!(round); assert!(is_full_round(round)); assert_eq!(state.len(), WIDTH); let is_first_round = round == 0; let is_last_round = round == R - 1; let is_pre_sparse_round = round == R_f - 1; if is_first_round { for state_i in state { assert!(state_i.is_unallocated()); } } else { for state_i in state { assert!(state_i.is_allocated()); } } let post_keys = &*POST_KEYS[round]; assert_eq!(post_keys.len(), if is_last_round { 0 } else { WIDTH }); // Copy field elements out of `MaybeAlloc`'s for more concise arithmetic. let state_values: Vec<Fp> = state .iter() .map(|maybe_alloc| maybe_alloc.value()) .collect(); // Calculate the S-box output for each state element. let sbox_outputs = if is_first_round { sbox_pre_post(&state_values) } else if is_last_round { sbox_no_add(&state_values) } else { sbox_post(&state_values, &post_keys) }; // Calculate the MDS mixing output for each state element. let m = if is_pre_sparse_round { &*PRE_SPARSE } else { &*MDS }; let mds_outputs = vec_matrix_mul(&sbox_outputs, m); // Store the allocated outputs of MDS mixing. let mut mds_outputs_alloc: Vec<Option<MaybeAlloc>> = vec![None; WIDTH]; layouter.assign_region( || format!("alloc (full) round {}", round), |mut region| { // Allocate each state element's row in the constraint system. for row_offset in 0..WIDTH { dbg!(row_offset); // If this is the first round, we are allocating the state element for the first // time, otherwise we are reallocating an element output by the previous round. let a_cell = region.assign_advice( || format!("a_{} (round {})", row_offset, round), self.config.a_col, row_offset, || Ok(state_values[row_offset]), )?; if !is_first_round { region.constrain_equal( &self.config.perm_output_to_input, state[row_offset].cell(), a_cell, )?; } // If this is the first round allocate a pre-S-box key. if is_first_round { region.assign_fixed( || format!("pre_key_{} (round {})", row_offset, round), self.config.pre_key_col, row_offset, || Ok(PRE_KEYS[row_offset]), )?; } // If this is not the last round allocate a post-S-box key. if !is_last_round { region.assign_fixed( || format!("post_key_{} (round {})", row_offset, round), self.config.post_key_col, row_offset, || Ok(post_keys[row_offset]), )?; } // Allocate the S-box output. region.assign_advice( || format!("sbox_out_{} (round {})", row_offset, round), self.config.sbox_out_col, row_offset, || Ok(sbox_outputs[row_offset]), )?; // Allocate the MDS mixing output. let mds_output_cell = region.assign_advice( || format!("mds_out_{} (round {})", row_offset, round), self.config.mds_out_col, row_offset, || Ok(mds_outputs[row_offset]), )?; // Keep a reference to the allocated MDS output. mds_outputs_alloc[row_offset] = Some(MaybeAlloc::Allocated(Alloc { cell: mds_output_cell, value: mds_outputs[row_offset], })); // Enable the S-box and MDS mixing selectors. if is_first_round { self.config.s_sbox_pre_post.enable(&mut region, row_offset)?; } else if is_last_round { self.config.s_sbox_no_add.enable(&mut region, row_offset)?; } else { self.config.s_sbox_post.enable(&mut region, row_offset)?; }; self.config.s_mds[row_offset].enable(&mut region, row_offset)?; // Allocate this MDS matrix row. for col in 0..WIDTH { region.assign_fixed( || format!( "{} row={}, col={} (round {})", if is_pre_sparse_round { "P" } else { "MDS" }, row_offset, col, round, ), self.config.mds_cols[col], row_offset, || Ok(m[row_offset][col]), )?; } } Ok(()) }, )?; let mds_outputs_alloc: Vec<MaybeAlloc> = mds_outputs_alloc .into_iter() .map(|opt| opt.unwrap()) .collect(); Ok(mds_outputs_alloc) } fn alloc_partial_round( &self, layouter: &mut impl Layouter<Fp>, state: &[MaybeAlloc], round: usize, ) -> Result<Vec<MaybeAlloc>, Error> { dbg!(round); assert!(is_partial_round(round)); assert_eq!(state.len(), WIDTH); for state_i in state { assert!(state_i.is_allocated()); } assert_eq!(POST_KEYS[round].len(), 1); let post_key = POST_KEYS[round][0]; // Copy field elements out of `MaybeAlloc`'s for more concise arithmetic. let state_values: Vec<Fp> = state .iter() .map(|maybe_alloc| maybe_alloc.value()) .collect(); // Calculate the S-box output for the first state element. let mut sbox_outputs: Vec<Fp> = vec![pow5(state_values[0]) + post_key]; sbox_outputs.extend_from_slice(&state_values[1..]); // Calculate the MDS mixing output for each state element. let sparse_index = round - R_f; let m = &*SPARSE[sparse_index]; let mds_outputs = vec_matrix_mul(&sbox_outputs, m); // Store the allocated outputs of MDS mixing. let mut mds_outputs_alloc: Vec<Option<MaybeAlloc>> = vec![None; WIDTH]; layouter.assign_region( || format!("alloc (partial) round {}", round), |mut region| { // Allocate values that are exclusive to the first row. let row_offset = 0; // Reallocate the first state element which was output by the previous round. let a_cell = region.assign_advice( || format!("a_0 (round {})", round), self.config.a_col, row_offset, || Ok(state_values[0]), )?; region.constrain_equal( &self.config.perm_output_to_input, state[0].cell(), a_cell, )?; // Allocate the first state element's post-S-box key. region.assign_fixed( || format!("post_key_0 (round {})", round), self.config.post_key_col, row_offset, || Ok(post_key), )?; // Enable the first state element's S-box selector. self.config.s_sbox_post.enable(&mut region, row_offset)?; // Allocate the remaining round values. for row_offset in 0..WIDTH { // If this is the first row (`row_offset = 0`), allocate the first state // element's S-box output. If this is not the first row, reallocate the state // element output by the previous round. let sbox_out_cell = region.assign_advice( || format!("sbox_out_{} (round {})", row_offset, round), self.config.sbox_out_col, row_offset, || Ok(sbox_outputs[row_offset]), )?; if row_offset > 0 { region.constrain_equal( &self.config.perm_output_to_sbox_output, state[row_offset].cell(), sbox_out_cell, )?; } // Allocate the state element's MDS mixing output. let mds_out_cell = region.assign_advice( || format!("mds_out_{} (round {})", row_offset, round), self.config.mds_out_col, row_offset, || Ok(mds_outputs[row_offset]), )?; // Keep a reference to the allocated MDS output. mds_outputs_alloc[row_offset] = Some(MaybeAlloc::Allocated(Alloc { cell: mds_out_cell, value: mds_outputs[row_offset], })); // Enable the MDS mixing selector for this state element. self.config.s_mds[row_offset].enable(&mut region, row_offset)?; // Allocate this MDS matrix row. for col in 0..WIDTH { region.assign_fixed( || format!( "S{} row={}, col={} (round {})", sparse_index, row_offset, col, round, ), self.config.mds_cols[col], row_offset, || Ok(m[row_offset][col]), )?; } } Ok(()) }, )?; let mds_outputs_alloc: Vec<MaybeAlloc> = mds_outputs_alloc .into_iter() .map(|opt| opt.unwrap()) .collect(); Ok(mds_outputs_alloc) } } struct PoseidonCircuit { // Private inputs. initial_state: Vec<Fp>, // Public inputs. digest: Fp, } #[derive(Clone)] struct PoseidonCircuitConfig { poseidon_config: PoseidonChipConfig, digest_col: Column<Advice>, pub_col: Column<Instance>, s_pub: Selector, perm_digest: Permutation, } impl Circuit<Fp> for PoseidonCircuit { type Config = PoseidonCircuitConfig; fn configure(cs: &mut ConstraintSystem<Fp>) -> Self::Config { let digest_col = cs.advice_column(); let poseidon_config = PoseidonChip::configure(cs, digest_col); let pub_col = cs.instance_column(); let s_pub = cs.selector(); let perm_digest = Permutation::new(cs, &[digest_col.into(), pub_col.into()]); // let (poseidon_config, io_cols) = PoseidonChip::configure(cs); // let preimg_cols = io_cols[..]; // let digest_col = io_cols[1]; // let pub_col = cs.instance_column(); // let s_pub = cs.selector(); // let perm_digest = Permutation::new(cs, &[digest_col.into(), pub_col.into()]); cs.create_gate("public input", |cs| { let digest = cs.query_advice(digest_col, Rotation::cur()); let pi = cs.query_instance(pub_col, Rotation::cur()); let s_pub = cs.query_selector(s_pub, Rotation::cur()); s_pub * (digest - pi) }); PoseidonCircuitConfig { poseidon_config, digest_col, pub_col, s_pub, perm_digest, } // PoseidonCircuitConfig { // poseidon_config, // preimg_col, // digest_col, // pub_col, // s_pub, // perm_digest, // } } fn synthesize(&self, cs: &mut impl Assignment<Fp>, config: Self::Config) -> Result<(), Error> { let mut layouter = SingleChipLayouter::new(cs)?; let poseidon_chip = PoseidonChip::new(config.poseidon_config.clone()); let mut state_alloc: Vec<MaybeAlloc> = self.initial_state .iter() .map(|state_i| MaybeAlloc::Unallocated(state_i.clone())) .collect(); for round in 0..R_f { state_alloc = poseidon_chip.alloc_full_round(&mut layouter, &state_alloc, round)?; } for round in R_f..R_f + R_P { state_alloc = poseidon_chip.alloc_partial_round(&mut layouter, &state_alloc, round)?; } for round in R_f + R_P..R { state_alloc = poseidon_chip.alloc_full_round(&mut layouter, &state_alloc, round)?; } // The calculated digest is the second element of the output state vector. let digest_alloc = &state_alloc[1]; dbg!(digest_alloc.value()); layouter.assign_region( || "digest equality", |mut region| { let row_offset = 0; let digest_copy_cell = region.assign_advice( || "digest copy", config.digest_col, row_offset, || Ok(digest_alloc.value()), )?; region.constrain_equal(&config.perm_digest, digest_alloc.cell(), digest_copy_cell)?; config.s_pub.enable(&mut region, row_offset)?; Ok(()) }, ); Ok(()) } } fn poseidon(preimg: &[Fp]) -> Fp { let mut state = sbox_pre_post(&preimg); state = vec_matrix_mul(&state, &*MDS); for round in 1..R_f { state = sbox_post(&state, &*POST_KEYS[round]); let m = if round == R_f - 1 { &*PRE_SPARSE } else { &*MDS }; state = vec_matrix_mul(&state, m); } for round in R_f..R_f + R_P { state[0] = pow5(state[0].clone()) + POST_KEYS[round][0]; let sparse_index = round - R_f; state = vec_matrix_mul(&state, &*SPARSE[sparse_index]); } for round in R_f + R_P..R - 1 { state = sbox_post(&state, &*POST_KEYS[round]); state = vec_matrix_mul(&state, &*MDS); } state = sbox_no_add(&state); state = vec_matrix_mul(&state, &*MDS); state[1] } fn main() { // There are `WIDTH` number of rows per round; add one row for checking that the calculated // digest is equal to the public digest. const N_ROWS_USED: u32 = (R * WIDTH + 1) as u32; const PUB_INPUT_ROW_INDEX: usize = N_ROWS_USED as usize - 1; // The public digest. let pub_input = Fp::from_bytes(&[ 105u8, 223, 174, 214, 135, 10, 246, 134, 56, 44, 82, 200, 244, 29, 158, 165, 255, 6, 80, 24, 144, 74, 184, 235, 28, 196, 134, 44, 131, 236, 207, 13, ]).unwrap(); // Verifier's public inputs. let k = (N_ROWS_USED as f32).log2().ceil() as u32; let n_rows = 1 << k; let mut pub_inputs = vec![Fp::zero(); n_rows]; pub_inputs[PUB_INPUT_ROW_INDEX] = Fp::from(pub_input); let preimg = vec![Fp::from(55), Fp::from(101), Fp::from(237)]; dbg!(poseidon(&preimg)); // Prover's circuit contains public and private inputs. let circuit = PoseidonCircuit { initial_state: preimg, digest: pub_input, }; let prover = MockProver::run(k, &circuit, vec![pub_inputs.clone()]).unwrap(); dbg!(prover.verify()); // assert!(prover.verify().is_ok()); }
35.966292
100
0.516019
e6459861835b32040c5f49afdcb60639eed1a226
5,926
use std::{ cell::UnsafeCell, sync::atomic::{AtomicBool, Ordering}, hint::unreachable_unchecked, panic::{UnwindSafe, RefUnwindSafe}, fmt, }; use lock_api::RawMutex as _RawMutex; use parking_lot::RawMutex; pub(crate) struct OnceCell<T> { mutex: Mutex, is_initialized: AtomicBool, value: UnsafeCell<Option<T>>, } // Why do we need `T: Send`? // Thread A creates a `OnceCell` and shares it with // scoped thread B, which fills the cell, which is // then destroyed by A. That is, destructor observes // a sent value. unsafe impl<T: Sync + Send> Sync for OnceCell<T> {} unsafe impl<T: Send> Send for OnceCell<T> {} impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceCell<T> {} impl<T: UnwindSafe> UnwindSafe for OnceCell<T> {} impl<T: fmt::Debug> fmt::Debug for OnceCell<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OnceCell").field("value", &self.get()).finish() } } impl<T> OnceCell<T> { pub(crate) const fn new() -> OnceCell<T> { OnceCell { mutex: Mutex::new(), is_initialized: AtomicBool::new(false), value: UnsafeCell::new(None), } } pub(crate) fn get(&self) -> Option<&T> { if self.is_initialized.load(Ordering::Acquire) { // This is safe: if we've read `true` with `Acquire`, that means // we've are paired with `Release` store, which sets the value. // Additionally, no one invalidates value after `is_initialized` is // set to `true` let value: &Option<T> = unsafe { &*self.value.get() }; value.as_ref() } else { None } } pub(crate) fn set(&self, value: T) -> Result<(), T> { let mut value = Some(value); { // We *could* optimistically check here if cell is initialized, but // we don't do that, assuming that `set` actually sets the value // most of the time. let _guard = self.mutex.lock(); // Relaxed loads are OK under the mutex, because it's mutex // unlock/lock that establishes "happens before". if !self.is_initialized.load(Ordering::Relaxed) { // Uniqueness of reference is guaranteed my mutex and flag let slot: &mut Option<T> = unsafe { &mut *self.value.get() }; debug_assert!(slot.is_none()); *slot = value.take(); // This `Release` guarantees that `get` sees only fully stored // value self.is_initialized.store(true, Ordering::Release); } } match value { None => Ok(()), Some(value) => Err(value), } } pub(crate) fn get_or_init<F: FnOnce() -> T>(&self, f: F) -> &T { enum Void {} match self.get_or_try_init(|| Ok::<T, Void>(f())) { Ok(val) => val, Err(void) => match void {}, } } pub(crate) fn get_or_try_init<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> { // Standard double-checked locking pattern. // Optimistically check if value is initialized, without locking a // mutex. if !self.is_initialized.load(Ordering::Acquire) { let _guard = self.mutex.lock(); // Relaxed is OK, because mutex unlock/lock establishes "happens // before". if !self.is_initialized.load(Ordering::Relaxed) { // We are calling user-supplied function and need to be careful. // - if it returns Err, we unlock mutex and return without touching anything // - if it panics, we unlock mutex and propagate panic without touching anything // - if it calls `set` or `get_or_try_init` re-entrantly, we get a deadlock on // mutex, which is important for safety. We *could* detect this and panic, // but that is more complicated // - finally, if it returns Ok, we store the value and store the flag with // `Release`, which synchronizes with `Acquire`s. let value = f()?; let slot: &mut Option<T> = unsafe { &mut *self.value.get() }; debug_assert!(slot.is_none()); *slot = Some(value); self.is_initialized.store(true, Ordering::Release); } } // Value is initialized here, because we've read `true` from // `is_initialized`, and have a "happens before" due to either // Acquire/Release pair (fast path) or mutex unlock (slow path). // While we could have just called `get`, that would be twice // as slow! let value: &Option<T> = unsafe { &*self.value.get() }; return match value.as_ref() { Some(it) => Ok(it), None => { debug_assert!(false); unsafe { unreachable_unchecked() } } }; } pub(crate) fn into_inner(self) -> Option<T> { // Because `into_inner` takes `self` by value, the compiler statically verifies // that it is not currently borrowed. So it is safe to move out `Option<T>`. self.value.into_inner() } } /// Wrapper around parking_lot's `RawMutex` which has `const fn` new. struct Mutex { inner: RawMutex, } impl Mutex { const fn new() -> Mutex { Mutex { inner: RawMutex::INIT } } fn lock(&self) -> MutexGuard<'_> { self.inner.lock(); MutexGuard { inner: &self.inner } } } struct MutexGuard<'a> { inner: &'a RawMutex, } impl Drop for MutexGuard<'_> { fn drop(&mut self) { self.inner.unlock(); } } #[test] #[cfg(pointer_width = "64")] fn test_size() { use std::mem::size_of; assert_eq!(size_of::<OnceCell<u32>>, 2 * size_of::<u32>); }
34.654971
97
0.561087
fc0ce48c9a9639d833b510384495f1d51d2a1aac
5,142
//! Initialize variables at runtime which then behave like static variables. //! //! ```rust //! extern crate late_static; //! use late_static::LateStatic; //! //! struct Foo { //! pub value: u32, //! } //! //! static FOO: LateStatic<Foo> = LateStatic::new(); //! //! fn main() { //! unsafe { //! LateStatic::assign(&FOO, Foo { value: 42 }); //! } //! println!("{}", FOO.value); //! } //! ``` #![cfg_attr(not(test), no_std)] use core::cell::UnsafeCell; /// Static value that is manually initialized at runtime. pub struct LateStatic<T> { val: UnsafeCell<Option<T>>, } unsafe impl<T: Send> core::marker::Send for LateStatic<T> {} unsafe impl<T: Sync> core::marker::Sync for LateStatic<T> {} impl<T> LateStatic<T> { /// Construct a LateStatic. pub const fn new() -> Self { LateStatic { val: UnsafeCell::new(None), } } /// Assign a value to the late static. /// /// This only works once. A second call to assign for a given variable will panic. /// /// # Safety /// /// This is completely unsafe if there is even the slightest chance of another /// thread trying to dereference the variable. pub unsafe fn assign(instance: &LateStatic<T>, val: T) { let option: &mut Option<T> = &mut *instance.val.get(); if option.is_some() { panic!("Second assignment to late static"); } else { *option = Some(val); } } /// Invalidate the late static by removing its inner value. /// /// # Safety /// /// This is completely unsafe if there is even the slightest chance of another /// thread trying to dereference the variable. pub unsafe fn clear(instance: &LateStatic<T>) { if !Self::has_value(instance) { panic!("Tried to clear a late static without a value"); } let option: &mut Option<T> = &mut *instance.val.get(); *option = None; } /// Whether a value is assigned to this LateStatic. /// /// # Safety /// /// This is completely unsafe if there is even the slightest chance of another /// thread trying to dereference the variable. pub unsafe fn has_value(instance: &LateStatic<T>) -> bool { let option: &Option<T> = &*instance.val.get(); option.is_some() } } impl<T> core::ops::Deref for LateStatic<T> { type Target = T; fn deref(&self) -> &T { unsafe { let option: &Option<T> = &*self.val.get(); match option { Some(ref val) => val, None => panic!("Dereference of late static before a value was assigned"), } } } } impl<T> core::ops::DerefMut for LateStatic<T> { fn deref_mut(&mut self) -> &mut T { unsafe { let option: &mut Option<T> = &mut *self.val.get(); match option { Some(ref mut val) => val, None => panic!("Dereference of late static before a value was assigned"), } } } } #[cfg(test)] mod tests { use super::*; static ASSIGN_ONCE_TEST: LateStatic<u32> = LateStatic::new(); #[test] fn assign_once() { unsafe { assert!(!LateStatic::has_value(&ASSIGN_ONCE_TEST)); LateStatic::assign(&ASSIGN_ONCE_TEST, 42); assert!(LateStatic::has_value(&ASSIGN_ONCE_TEST)); } } static ASSIGN_TWICE_TEST: LateStatic<u32> = LateStatic::new(); #[test] #[should_panic] fn assign_twice() { unsafe { LateStatic::assign(&ASSIGN_TWICE_TEST, 42); LateStatic::assign(&ASSIGN_TWICE_TEST, 37); } } struct Foo { pub value: u32, } static DEREF_CONST_TEST: LateStatic<Foo> = LateStatic::new(); #[test] fn deref_const() { unsafe { LateStatic::assign(&DEREF_CONST_TEST, Foo { value: 42 }); } assert_eq!(DEREF_CONST_TEST.value, 42); } static mut DEREF_MUT_TEST: LateStatic<Foo> = LateStatic::new(); #[test] fn deref_mut() { unsafe { LateStatic::assign(&DEREF_MUT_TEST, Foo { value: 42 }); assert_eq!(DEREF_MUT_TEST.value, 42); DEREF_MUT_TEST.value = 37; assert_eq!(DEREF_MUT_TEST.value, 37); } } static mut DEREF_WITHOUT_VALUE: LateStatic<Foo> = LateStatic::new(); #[test] #[should_panic] fn deref_without_value() { unsafe { #[allow(clippy::no_effect)] DEREF_WITHOUT_VALUE.value; } } static mut CLEAR_TEST: LateStatic<Foo> = LateStatic::new(); #[test] fn clear() { unsafe { LateStatic::assign(&CLEAR_TEST, Foo { value: 42 }); assert_eq!(CLEAR_TEST.value, 42); LateStatic::clear(&CLEAR_TEST); assert!(!LateStatic::has_value(&CLEAR_TEST)); } } static mut CLEAR_WITHOUT_VALUE: LateStatic<Foo> = LateStatic::new(); #[test] #[should_panic] fn clear_without_value() { unsafe { LateStatic::clear(&CLEAR_WITHOUT_VALUE); } } }
27.497326
89
0.563011
6787db5519de2d8d651753fd158a65a23b96d65b
26,555
use crate::bridged_type::{pat_type_pat_is_self, BridgedType}; use crate::errors::{FunctionAttributeParseError, IdentifiableParseError, ParseError, ParseErrors}; use crate::parse::parse_extern_mod::function_attributes::FunctionAttributes; use crate::parse::parse_extern_mod::generic_opaque_type::GenericOpaqueType; use crate::parse::parse_extern_mod::opaque_type_attributes::{ OpaqueTypeAttr, OpaqueTypeAttributes, }; use crate::parse::type_declarations::{ OpaqueForeignTypeDeclaration, TypeDeclaration, TypeDeclarations, }; use crate::parse::HostLang; use crate::ParsedExternFn; use quote::ToTokens; use std::cmp::Ordering; use std::collections::HashMap; use std::ops::Deref; use syn::{FnArg, ForeignItem, ForeignItemFn, ItemForeignMod, Meta, Pat, ReturnType, Type}; mod function_attributes; mod generic_opaque_type; mod opaque_type_attributes; pub(super) struct ForeignModParser<'a> { pub errors: &'a mut ParseErrors, /// All of the type declarations across all of the extern "..." foreign modules in the /// `mod` module that this foreign module is in. pub type_declarations: &'a mut TypeDeclarations, pub functions: &'a mut Vec<ParsedExternFn>, pub unresolved_types: &'a mut Vec<Type>, } impl<'a> ForeignModParser<'a> { pub fn parse(mut self, mut foreign_mod: ItemForeignMod) -> Result<(), syn::Error> { if foreign_mod.abi.name.is_none() { self.errors.push(ParseError::AbiNameMissing { extern_token: foreign_mod.abi.extern_token, }); return Ok(()); } let abi_name = foreign_mod.abi.name.unwrap(); let host_lang = match abi_name.value().as_str() { "Rust" => HostLang::Rust, "Swift" => HostLang::Swift, _ => { self.errors.push(ParseError::AbiNameInvalid { abi_name }); return Ok(()); } }; foreign_mod.items.sort_by(|a, _b| { if matches!(a, ForeignItem::Type(_)) { Ordering::Less } else { Ordering::Greater } }); let mut local_type_declarations = HashMap::new(); for foreign_mod_item in foreign_mod.items { match foreign_mod_item { ForeignItem::Type(foreign_ty) => { // TODO: Normalize with the code used to parse generic foreign item types let ty_name = foreign_ty.ident.to_string(); if let Some(_builtin) = BridgedType::new_with_str( &foreign_ty.ident.to_string(), &self.type_declarations, ) { self.errors.push(ParseError::DeclaredBuiltInType { ty: foreign_ty.clone(), }); } let mut attributes = OpaqueTypeAttributes::default(); let mut doc_comment = None; for attr in foreign_ty.attrs.iter() { let attribute_name = attr.path.to_token_stream().to_string(); match attribute_name.as_str() { "doc" => { let meta = attr.parse_meta()?; let doc = match meta { Meta::NameValue(name_val) => match name_val.lit { syn::Lit::Str(comment) => comment.value(), _ => { todo!("Push parse error that doc attribute is in incorrect format") } }, _ => { todo!("Push parse error that doc attribute is in incorrect format") } }; doc_comment = Some(doc); } "swift_bridge" => { let attr: OpaqueTypeAttr = attr.parse_args()?; attributes.store_attrib(attr); } _ => todo!("Push unsupported attribute error."), }; } let foreign_type = OpaqueForeignTypeDeclaration { ty: foreign_ty.ident.clone(), host_lang, already_declared: attributes.already_declared, doc_comment, generics: vec![], }; self.type_declarations.insert( ty_name.clone(), TypeDeclaration::Opaque(foreign_type.clone()), ); local_type_declarations.insert(ty_name, foreign_type); } ForeignItem::Fn(func) => { let mut attributes = FunctionAttributes::default(); for attr in func.attrs.iter() { attributes = attr.parse_args()?; } for arg in func.sig.inputs.iter() { if let FnArg::Typed(pat_ty) = arg { let ty = &pat_ty.ty; if BridgedType::new_with_type(&ty, &self.type_declarations).is_none() { self.unresolved_types.push(ty.deref().clone()); } } } let return_type = &func.sig.output; if let ReturnType::Type(_, return_ty) = return_type { if BridgedType::new_with_type(return_ty.deref(), &self.type_declarations) .is_none() { self.unresolved_types.push(return_ty.deref().clone()); } } let first_input = func.sig.inputs.iter().next(); let associated_type = self.get_associated_type( first_input, func.clone(), &attributes, &mut local_type_declarations, )?; if attributes.is_swift_identifiable { let args = &func.sig.inputs; let mut is_ref_self_no_args = args.len() == 1; if is_ref_self_no_args { is_ref_self_no_args = match args.iter().next().unwrap() { FnArg::Receiver(receiver) => { receiver.reference.is_some() && receiver.mutability.is_none() } FnArg::Typed(pat_ty) => { pat_type_pat_is_self(pat_ty) && pat_ty.ty.to_token_stream().to_string().starts_with("&") } }; } let has_return_type = matches!(&func.sig.output, ReturnType::Type(_, _)); if !is_ref_self_no_args { self.errors.push(ParseError::FunctionAttribute( FunctionAttributeParseError::Identifiable( IdentifiableParseError::MustBeRefSelf { fn_ident: func.sig.ident.clone(), }, ), )); } if !has_return_type { self.errors.push(ParseError::FunctionAttribute( FunctionAttributeParseError::Identifiable( IdentifiableParseError::MissingReturnType { fn_ident: func.sig.ident.clone(), }, ), )); } } self.functions.push(ParsedExternFn { func, associated_type, is_swift_initializer: attributes.is_swift_initializer, is_swift_identifiable: attributes.is_swift_identifiable, host_lang, rust_name_override: attributes.rust_name, swift_name_override: attributes.swift_name, into_return_type: attributes.into_return_type, return_with: attributes.return_with, args_into: attributes.args_into, }); } ForeignItem::Verbatim(foreign_item_verbatim) => { if let Ok(generic_foreign_type) = syn::parse2::<GenericOpaqueType>(foreign_item_verbatim) { // TODO: Normalize with the code used to parse non-generic foreign item // types let ty_name = generic_foreign_type.ident.to_string(); let foreign_ty = OpaqueForeignTypeDeclaration { ty: generic_foreign_type.ident, host_lang, already_declared: false, doc_comment: None, generics: generic_foreign_type .generics .params .clone() .into_iter() .collect(), }; self.type_declarations .insert(ty_name.clone(), TypeDeclaration::Opaque(foreign_ty.clone())); local_type_declarations.insert(ty_name, foreign_ty); } } _ => {} } } Ok(()) } fn get_associated_type( &mut self, first: Option<&FnArg>, func: ForeignItemFn, attributes: &FunctionAttributes, local_type_declarations: &mut HashMap<String, OpaqueForeignTypeDeclaration>, ) -> syn::Result<Option<TypeDeclaration>> { let associated_type = match first { Some(FnArg::Receiver(recv)) => { if local_type_declarations.len() == 1 { let ty = local_type_declarations.iter_mut().next().unwrap().1; let associated_type = Some(TypeDeclaration::Opaque(ty.clone())); associated_type } else { self.errors.push(ParseError::AmbiguousSelf { self_: recv.clone(), }); return Ok(None); } } Some(FnArg::Typed(arg)) => match arg.pat.deref() { Pat::Ident(pat_ident) => { if pat_ident.ident.to_string() == "self" { let self_ty = match arg.ty.deref() { Type::Path(ty_path) => ty_path.path.segments.to_token_stream(), Type::Reference(type_ref) => type_ref.elem.deref().to_token_stream(), _ => { todo!("Add a test that hits this branch") } }; let self_ty_string = self_ty.to_string(); let ty = self.type_declarations.get(&self_ty_string).unwrap(); let associated_type = Some(ty.clone()); associated_type } else { let associated_type = self.get_associated_type( None, func.clone(), attributes, local_type_declarations, )?; associated_type } } _ => { todo!( r#" One way to hit this block is with a `fn foo (&self: SomeType)`. Note that this is an invalid signature since the `&` should be in front of `SomeType`, not `self`. i.e., this would be correct: `fn foo (self: &SomeType)` We should add a test case like this that hits this block, and verify that we push a parse error indicating that the function signature is invalid. For common mistakes such as the `&self: SomeType` example, we can have dedicated errors telling you exactly how to fix it. Otherwise we use a more general error that says that your argument is invalid. "# ) } }, None => { let associated_type = if let Some(associated_to) = &attributes.associated_to { let ty = self .type_declarations .get(&associated_to.to_string()) .unwrap(); Some(ty.clone()) } else if attributes.is_swift_initializer { let ty_string = match &func.sig.output { ReturnType::Default => { todo!("Push error if initializer does not return a type") } ReturnType::Type(_, ty) => ty.deref().to_token_stream().to_string(), }; let ty = self.type_declarations.get(&ty_string); ty.map(|ty| ty.clone()) } else { None }; associated_type } }; Ok(associated_type) } } #[cfg(test)] mod tests { use crate::errors::ParseError; use crate::test_utils::{parse_errors, parse_ok}; use crate::SwiftBridgeModule; use quote::{quote, ToTokens}; use syn::parse_quote; /// Verify that we can parse a SwiftBridgeModule from an empty module. #[test] fn parse_empty_module() { let tokens = quote! { mod foo { } }; let module: SwiftBridgeModule = parse_quote!(#tokens); assert_eq!(module.name.to_string(), "foo"); } /// Verify that we store an error if no abi name was provided. #[test] fn error_if_no_abi_name_provided_for_an_extern_block() { let tokens = quote! { mod foo { extern {} } }; let errors = parse_errors(tokens); assert_eq!(errors.len(), 1); match errors[0] { ParseError::AbiNameMissing { .. } => {} _ => panic!(), } } /// Verify that we store an error if the abi name isn't Rust or Swift. #[test] fn error_if_invalid_abi_name() { let tokens = quote! { mod foo { extern "SomeAbi" {} } }; let errors = parse_errors(tokens); assert_eq!(errors.len(), 1); match &errors[0] { ParseError::AbiNameInvalid { abi_name } => { assert_eq!(abi_name.value(), "SomeAbi"); } _ => panic!(), } } /// Verify that we can parse a Rust type declaration. #[test] fn rust_type_declaration() { let tokens = quote! { mod foo { extern "Rust" { type Foo; } } }; let module = parse_ok(tokens); assert_eq!(module.types.types()[0].unwrap_opaque().to_string(), "Foo"); } /// Verify that we return an error if the declared type is a built in type. #[test] fn error_if_declared_built_in_type() { let tokens = quote! { mod foo { extern "Rust" { type u8; } } }; let errors = parse_errors(tokens); assert_eq!(errors.len(), 1); } /// Verify that we can parse a Rust type's methods. /// We test all of the possible ways we can specify self. #[test] fn parses_rust_self_methods() { let tests = vec![ quote! { fn bar (self); }, quote! { fn bar (&self); }, quote! { fn bar (&mut self); }, quote! { fn bar (self: Foo); }, quote! { fn bar (self: &Foo); }, quote! { fn bar (self: &mut Foo); }, ]; for fn_definition in tests { let tokens = quote! { mod foo { extern "Rust" { type Foo; #fn_definition } } }; let module = parse_ok(tokens); let ty = &module.types.types()[0].unwrap_opaque(); assert_eq!(ty.to_string(), "Foo"); assert_eq!( module.functions.len(), 1, "Failed not parse {} into an associated method.", quote! {#fn_definition}.to_string() ); } } /// Verify that if a a type is defined in another foreign module block we can still use it. #[test] fn type_defined_in_another_foreign_module() { let tokens = quote! { mod foo { extern "Rust" { fn foo () -> AnotherType; } extern "Rust" { type AnotherType; } } }; let errors = parse_errors(tokens); assert_eq!(errors.len(), 0,); } /// Verify that we can parse a freestanding Rust function declaration. #[test] fn rust_freestanding_function_no_args() { let tokens = quote! { mod foo { extern "Rust" { fn bar () -> u8; } } }; let module = parse_ok(tokens); assert_eq!(module.functions.len(), 1); } /// Verify that we can parse a freestanding Rust function declaration that has one arg. #[test] fn rust_freestanding_function_one_arg() { let tokens = quote! { mod foo { extern "Rust" { fn bar (bazz: u32); } } }; let module = parse_ok(tokens); assert_eq!(module.functions.len(), 1); } /// Verify that if a freestanding function has argument types that were not declared in the /// module we return an error. #[test] fn freestanding_function_argument_undeclared_type() { let tokens = quote! { mod foo { extern "Rust" { type Foo; fn a (bar: Bar); fn b (bar: &Bar); fn c (bar: &mut Bar); // Counts as two errors. fn d (multiple: Bar, args: Bar); } } }; let errors = parse_errors(tokens); assert_eq!(errors.len(), 5); for error in errors.iter() { match error { ParseError::UndeclaredType { ty } => { let ty_name = ty.to_token_stream().to_string(); // "& Bar" -> "Bar" let ty_name = ty_name.split_whitespace().last().unwrap(); assert_eq!(ty_name, "Bar"); } _ => panic!(), } } } /// Verify that a freestanding function can return a declared type. #[test] fn freestanding_function_return_declared_type() { let tokens = quote! { #[swift_bridge::bridge] mod foo { extern "Rust" { type Bar; fn a () -> Bar; } } }; let module = parse_ok(tokens); assert_eq!(module.functions.len(), 1); } /// Verify that if a freestanding function returns a type that was not declared in the module /// we return an error. #[test] fn freestanding_function_returns_undeclared_type() { let tokens = quote! { mod foo { extern "Rust" { type Foo; fn a () -> Bar; fn a () -> &Bar; fn a () -> &mut Bar; } } }; let errors = parse_errors(tokens); assert_eq!(errors.len(), 3); for error in errors.iter() { match error { ParseError::UndeclaredType { ty } => { let ty_name = ty.to_token_stream().to_string(); // "& Bar" -> "Bar" let ty_name = ty_name.split_whitespace().last().unwrap(); assert_eq!(ty_name, "Bar"); } _ => panic!(), } } } /// Verify that if an extern Rust block has more than one type, we push errors for any methods /// that have an ambiguous self. #[test] fn error_if_method_has_ambiguous_self() { let tokens = quote! { mod foo { extern "Rust" { type SomeType; type AnotherType; fn a (self); fn b (&self); fn c (&mut self); } } }; let errors = parse_errors(tokens); assert_eq!(errors.len(), 3); for idx in 0..3 { match &errors[idx] { ParseError::AmbiguousSelf { self_: _ } => {} _ => panic!(), }; } } /// Verify that annotated self methods get parsed. #[test] fn disambiguate_method() { let tokens = quote! { mod foo { extern "Rust" { type SomeType; type AnotherType; fn a (self: SomeType); fn b (self: &SomeType); fn c (self: &mut AnotherType); } } }; let module = parse_ok(tokens); let functions = &module.functions; for (ty_name, expected_count) in vec![("SomeType", 2), ("AnotherType", 1)] { assert_eq!( functions .iter() .filter(|f| f.associated_type.as_ref().unwrap().unwrap_opaque().ty == ty_name) .count(), expected_count ); } } /// Verify that if we have multiple externs types can be inferred within each. #[test] fn infer_type_with_multiple_externs() { let tokens = quote! { mod foo { extern "Rust" { type SomeType; fn a (&self); } extern "Rust" { type AnotherType; fn b (&self); } } }; let module = parse_ok(tokens); let functions = &module.functions; for (ty_name, expected_count) in vec![("SomeType", 1), ("AnotherType", 1)] { assert_eq!( functions .iter() .filter(|f| f.associated_type.as_ref().unwrap().unwrap_opaque().ty == ty_name) .count(), expected_count ); } } /// Verify that we we do not get any parsing errors when we use a type that is declared in /// an extern block that comes after the block that it is used in. #[test] fn type_declared_in_separate_extern_block_after_use() { let tokens = quote! { mod foo { extern "Rust" { fn a () -> AnotherType; fn b () -> Vec<AnotherType>; // TODO: (Dec 2021) Uncomment this when we support Option<OpaqueRustType> // fn c () -> Option<AnotherType>; fn d (arg: AnotherType); } extern "Rust" { type AnotherType; } } }; assert_eq!(parse_errors(tokens).len(), 0,); } /// Verify that we can parse the `already_declared` attribute. #[test] fn parse_already_declared_attribute() { let tokens = quote! { mod foo { extern "Rust" { #[swift_bridge(already_declared)] type AnotherType; } } }; let module = parse_ok(tokens); assert!( module .types .get("AnotherType") .unwrap() .unwrap_opaque() .already_declared ); } /// Verify that we can parse a doc comment from an extern "Rust" opaque type. #[test] fn parse_opaque_rust_type_doc_comment() { let tokens = quote! { mod foo { extern "Rust" { /// Some comment type AnotherType; } } }; let module = parse_ok(tokens); assert_eq!( module .types .get("AnotherType") .unwrap() .unwrap_opaque() .doc_comment .as_ref() .unwrap(), " Some comment" ); } /// Verify that we can parse generic extern "Rust" types #[test] fn parse_generic_extern_rust_type() { let tokens = quote! { #[swift_bridge:bridge] mod foo { extern "Rust" { type SomeType<A, B>; } } }; let module = parse_ok(tokens); assert_eq!( module .types .get("SomeType") .unwrap() .unwrap_opaque() .generics .len(), 2 ); } }
34.044872
111
0.445829
4be8e78f2b414cfc2681ee99c4720290dc99d9d7
1,669
use async_trait::*; use ic_base_types::{NodeId, RegistryVersion}; use ic_interfaces_transport::{ AsyncTransportEventHandler, FlowId, FlowTag, SendError, Transport, TransportErrorCode, TransportPayload, TransportStateChange, }; use ic_protobuf::registry::node::v1::NodeRecord; use mockall::*; use std::sync::Arc; mock! { pub Transport {} trait Transport { fn register_client( &self, event_handler: Arc<dyn AsyncTransportEventHandler>, ) -> Result<(), TransportErrorCode>; fn start_connections( &self, peer: &NodeId, record: &NodeRecord, registry_version: RegistryVersion, ) -> Result<(), TransportErrorCode>; fn stop_connections( &self, peer: &NodeId, ) -> Result<(), TransportErrorCode>; fn send( &self, peer: &NodeId, flow: FlowTag, message: TransportPayload, ) -> Result<(), TransportErrorCode>; fn clear_send_queues( &self, peer: &NodeId, ); fn clear_send_queue( &self, peer: &NodeId, flow_tag: FlowTag, ); } } mock! { pub TranportEventHandler {} } #[async_trait] impl AsyncTransportEventHandler for MockTranportEventHandler { async fn send_message( &self, _flow: FlowId, _message: TransportPayload, ) -> Result<(), SendError> { Ok(()) } async fn state_changed(&self, _state_change: TransportStateChange) {} async fn error(&self, _flow_id: FlowId, _error: TransportErrorCode) {} }
24.544118
90
0.579988
d965f768df3133c157bc9af53fb27baa9ac4e998
44,604
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. // Some deserializer fields are only used on Unix and Windows build fails without it use super::io::StdFileResource; use crate::fs_util::canonicalize_path; use crate::permissions::Permissions; use deno_core::error::bad_resource_id; use deno_core::error::custom_error; use deno_core::error::type_error; use deno_core::error::AnyError; use deno_core::OpState; use deno_core::RcRef; use deno_core::ResourceId; use deno_core::ZeroCopyBuf; use deno_crypto::rand::thread_rng; use deno_crypto::rand::Rng; use log::debug; use serde::Deserialize; use serde::Serialize; use std::cell::RefCell; use std::convert::From; use std::env::{current_dir, set_current_dir, temp_dir}; use std::io; use std::io::{Seek, SeekFrom}; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::time::SystemTime; use std::time::UNIX_EPOCH; use tokio::io::AsyncSeekExt; #[cfg(not(unix))] use deno_core::error::generic_error; #[cfg(not(unix))] use deno_core::error::not_supported; pub fn init(rt: &mut deno_core::JsRuntime) { super::reg_json_sync(rt, "op_open_sync", op_open_sync); super::reg_json_async(rt, "op_open_async", op_open_async); super::reg_json_sync(rt, "op_seek_sync", op_seek_sync); super::reg_json_async(rt, "op_seek_async", op_seek_async); super::reg_json_sync(rt, "op_fdatasync_sync", op_fdatasync_sync); super::reg_json_async(rt, "op_fdatasync_async", op_fdatasync_async); super::reg_json_sync(rt, "op_fsync_sync", op_fsync_sync); super::reg_json_async(rt, "op_fsync_async", op_fsync_async); super::reg_json_sync(rt, "op_fstat_sync", op_fstat_sync); super::reg_json_async(rt, "op_fstat_async", op_fstat_async); super::reg_json_sync(rt, "op_umask", op_umask); super::reg_json_sync(rt, "op_chdir", op_chdir); super::reg_json_sync(rt, "op_mkdir_sync", op_mkdir_sync); super::reg_json_async(rt, "op_mkdir_async", op_mkdir_async); super::reg_json_sync(rt, "op_chmod_sync", op_chmod_sync); super::reg_json_async(rt, "op_chmod_async", op_chmod_async); super::reg_json_sync(rt, "op_chown_sync", op_chown_sync); super::reg_json_async(rt, "op_chown_async", op_chown_async); super::reg_json_sync(rt, "op_remove_sync", op_remove_sync); super::reg_json_async(rt, "op_remove_async", op_remove_async); super::reg_json_sync(rt, "op_copy_file_sync", op_copy_file_sync); super::reg_json_async(rt, "op_copy_file_async", op_copy_file_async); super::reg_json_sync(rt, "op_stat_sync", op_stat_sync); super::reg_json_async(rt, "op_stat_async", op_stat_async); super::reg_json_sync(rt, "op_realpath_sync", op_realpath_sync); super::reg_json_async(rt, "op_realpath_async", op_realpath_async); super::reg_json_sync(rt, "op_read_dir_sync", op_read_dir_sync); super::reg_json_async(rt, "op_read_dir_async", op_read_dir_async); super::reg_json_sync(rt, "op_rename_sync", op_rename_sync); super::reg_json_async(rt, "op_rename_async", op_rename_async); super::reg_json_sync(rt, "op_link_sync", op_link_sync); super::reg_json_async(rt, "op_link_async", op_link_async); super::reg_json_sync(rt, "op_symlink_sync", op_symlink_sync); super::reg_json_async(rt, "op_symlink_async", op_symlink_async); super::reg_json_sync(rt, "op_read_link_sync", op_read_link_sync); super::reg_json_async(rt, "op_read_link_async", op_read_link_async); super::reg_json_sync(rt, "op_ftruncate_sync", op_ftruncate_sync); super::reg_json_async(rt, "op_ftruncate_async", op_ftruncate_async); super::reg_json_sync(rt, "op_truncate_sync", op_truncate_sync); super::reg_json_async(rt, "op_truncate_async", op_truncate_async); super::reg_json_sync(rt, "op_make_temp_dir_sync", op_make_temp_dir_sync); super::reg_json_async(rt, "op_make_temp_dir_async", op_make_temp_dir_async); super::reg_json_sync(rt, "op_make_temp_file_sync", op_make_temp_file_sync); super::reg_json_async(rt, "op_make_temp_file_async", op_make_temp_file_async); super::reg_json_sync(rt, "op_cwd", op_cwd); super::reg_json_sync(rt, "op_futime_sync", op_futime_sync); super::reg_json_async(rt, "op_futime_async", op_futime_async); super::reg_json_sync(rt, "op_utime_sync", op_utime_sync); super::reg_json_async(rt, "op_utime_async", op_utime_async); } fn into_string(s: std::ffi::OsString) -> Result<String, AnyError> { s.into_string().map_err(|s| { let message = format!("File name or path {:?} is not valid UTF-8", s); custom_error("InvalidData", message) }) } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct OpenArgs { path: String, mode: Option<u32>, options: OpenOptions, } #[derive(Deserialize, Default, Debug)] #[serde(rename_all = "camelCase")] #[serde(default)] pub struct OpenOptions { read: bool, write: bool, create: bool, truncate: bool, append: bool, create_new: bool, } fn open_helper( state: &mut OpState, args: OpenArgs, ) -> Result<(PathBuf, std::fs::OpenOptions), AnyError> { let path = Path::new(&args.path).to_path_buf(); let mut open_options = std::fs::OpenOptions::new(); if let Some(mode) = args.mode { // mode only used if creating the file on Unix // if not specified, defaults to 0o666 #[cfg(unix)] { use std::os::unix::fs::OpenOptionsExt; open_options.mode(mode & 0o777); } #[cfg(not(unix))] let _ = mode; // avoid unused warning } let permissions = state.borrow::<Permissions>(); let options = args.options; if options.read { permissions.read.check(&path)?; } if options.write || options.append { permissions.write.check(&path)?; } open_options .read(options.read) .create(options.create) .write(options.write) .truncate(options.truncate) .append(options.append) .create_new(options.create_new); Ok((path, open_options)) } fn op_open_sync( state: &mut OpState, args: OpenArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<ResourceId, AnyError> { let (path, open_options) = open_helper(state, args)?; let std_file = open_options.open(path)?; let tokio_file = tokio::fs::File::from_std(std_file); let resource = StdFileResource::fs_file(tokio_file); let rid = state.resource_table.add(resource); Ok(rid) } async fn op_open_async( state: Rc<RefCell<OpState>>, args: OpenArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<ResourceId, AnyError> { let (path, open_options) = open_helper(&mut state.borrow_mut(), args)?; let tokio_file = tokio::fs::OpenOptions::from(open_options) .open(path) .await?; let resource = StdFileResource::fs_file(tokio_file); let rid = state.borrow_mut().resource_table.add(resource); Ok(rid) } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct SeekArgs { rid: ResourceId, offset: i64, whence: i32, } fn seek_helper(args: SeekArgs) -> Result<(u32, SeekFrom), AnyError> { let rid = args.rid; let offset = args.offset; let whence = args.whence as u32; // Translate seek mode to Rust repr. let seek_from = match whence { 0 => SeekFrom::Start(offset as u64), 1 => SeekFrom::Current(offset), 2 => SeekFrom::End(offset), _ => { return Err(type_error(format!("Invalid seek mode: {}", whence))); } }; Ok((rid, seek_from)) } fn op_seek_sync( state: &mut OpState, args: SeekArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<u64, AnyError> { let (rid, seek_from) = seek_helper(args)?; let pos = StdFileResource::with(state, rid, |r| match r { Ok(std_file) => std_file.seek(seek_from).map_err(AnyError::from), Err(_) => Err(type_error( "cannot seek on this type of resource".to_string(), )), })?; Ok(pos) } async fn op_seek_async( state: Rc<RefCell<OpState>>, args: SeekArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<u64, AnyError> { let (rid, seek_from) = seek_helper(args)?; let resource = state .borrow_mut() .resource_table .get::<StdFileResource>(rid) .ok_or_else(bad_resource_id)?; if resource.fs_file.is_none() { return Err(bad_resource_id()); } let mut fs_file = RcRef::map(&resource, |r| r.fs_file.as_ref().unwrap()) .borrow_mut() .await; let pos = (*fs_file).0.as_mut().unwrap().seek(seek_from).await?; Ok(pos) } fn op_fdatasync_sync( state: &mut OpState, rid: ResourceId, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { StdFileResource::with(state, rid, |r| match r { Ok(std_file) => std_file.sync_data().map_err(AnyError::from), Err(_) => Err(type_error("cannot sync this type of resource".to_string())), })?; Ok(()) } async fn op_fdatasync_async( state: Rc<RefCell<OpState>>, rid: ResourceId, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let resource = state .borrow_mut() .resource_table .get::<StdFileResource>(rid) .ok_or_else(bad_resource_id)?; if resource.fs_file.is_none() { return Err(bad_resource_id()); } let mut fs_file = RcRef::map(&resource, |r| r.fs_file.as_ref().unwrap()) .borrow_mut() .await; (*fs_file).0.as_mut().unwrap().sync_data().await?; Ok(()) } fn op_fsync_sync( state: &mut OpState, rid: ResourceId, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { StdFileResource::with(state, rid, |r| match r { Ok(std_file) => std_file.sync_all().map_err(AnyError::from), Err(_) => Err(type_error("cannot sync this type of resource".to_string())), })?; Ok(()) } async fn op_fsync_async( state: Rc<RefCell<OpState>>, rid: ResourceId, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let resource = state .borrow_mut() .resource_table .get::<StdFileResource>(rid) .ok_or_else(bad_resource_id)?; if resource.fs_file.is_none() { return Err(bad_resource_id()); } let mut fs_file = RcRef::map(&resource, |r| r.fs_file.as_ref().unwrap()) .borrow_mut() .await; (*fs_file).0.as_mut().unwrap().sync_all().await?; Ok(()) } fn op_fstat_sync( state: &mut OpState, rid: ResourceId, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<FsStat, AnyError> { super::check_unstable(state, "Deno.fstat"); let metadata = StdFileResource::with(state, rid, |r| match r { Ok(std_file) => std_file.metadata().map_err(AnyError::from), Err(_) => Err(type_error("cannot stat this type of resource".to_string())), })?; Ok(get_stat(metadata)) } async fn op_fstat_async( state: Rc<RefCell<OpState>>, rid: ResourceId, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<FsStat, AnyError> { super::check_unstable2(&state, "Deno.fstat"); let resource = state .borrow_mut() .resource_table .get::<StdFileResource>(rid) .ok_or_else(bad_resource_id)?; if resource.fs_file.is_none() { return Err(bad_resource_id()); } let mut fs_file = RcRef::map(&resource, |r| r.fs_file.as_ref().unwrap()) .borrow_mut() .await; let metadata = (*fs_file).0.as_mut().unwrap().metadata().await?; Ok(get_stat(metadata)) } #[allow(clippy::unnecessary_wraps)] fn op_umask( state: &mut OpState, mask: Option<u32>, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<u32, AnyError> { super::check_unstable(state, "Deno.umask"); // TODO implement umask for Windows // see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc // and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019 #[cfg(not(unix))] { let _ = mask; // avoid unused warning. Err(not_supported()) } #[cfg(unix)] { use nix::sys::stat::mode_t; use nix::sys::stat::umask; use nix::sys::stat::Mode; let r = if let Some(mask) = mask { // If mask provided, return previous. umask(Mode::from_bits_truncate(mask as mode_t)) } else { // If no mask provided, we query the current. Requires two syscalls. let prev = umask(Mode::from_bits_truncate(0o777)); let _ = umask(prev); prev }; Ok(r.bits() as u32) } } fn op_chdir( state: &mut OpState, directory: String, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let d = PathBuf::from(&directory); state.borrow::<Permissions>().read.check(&d)?; set_current_dir(&d)?; Ok(()) } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct MkdirArgs { path: String, recursive: bool, mode: Option<u32>, } fn op_mkdir_sync( state: &mut OpState, args: MkdirArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = Path::new(&args.path).to_path_buf(); let mode = args.mode.unwrap_or(0o777) & 0o777; state.borrow::<Permissions>().write.check(&path)?; debug!("op_mkdir {} {:o} {}", path.display(), mode, args.recursive); let mut builder = std::fs::DirBuilder::new(); builder.recursive(args.recursive); #[cfg(unix)] { use std::os::unix::fs::DirBuilderExt; builder.mode(mode); } builder.create(path)?; Ok(()) } async fn op_mkdir_async( state: Rc<RefCell<OpState>>, args: MkdirArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = Path::new(&args.path).to_path_buf(); let mode = args.mode.unwrap_or(0o777) & 0o777; { let state = state.borrow(); state.borrow::<Permissions>().write.check(&path)?; } tokio::task::spawn_blocking(move || { debug!("op_mkdir {} {:o} {}", path.display(), mode, args.recursive); let mut builder = std::fs::DirBuilder::new(); builder.recursive(args.recursive); #[cfg(unix)] { use std::os::unix::fs::DirBuilderExt; builder.mode(mode); } builder.create(path)?; Ok(()) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct ChmodArgs { path: String, mode: u32, } fn op_chmod_sync( state: &mut OpState, args: ChmodArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = Path::new(&args.path).to_path_buf(); let mode = args.mode & 0o777; state.borrow::<Permissions>().write.check(&path)?; debug!("op_chmod_sync {} {:o}", path.display(), mode); #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let permissions = PermissionsExt::from_mode(mode); std::fs::set_permissions(&path, permissions)?; Ok(()) } // TODO Implement chmod for Windows (#4357) #[cfg(not(unix))] { // Still check file/dir exists on Windows let _metadata = std::fs::metadata(&path)?; Err(generic_error("Not implemented")) } } async fn op_chmod_async( state: Rc<RefCell<OpState>>, args: ChmodArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = Path::new(&args.path).to_path_buf(); let mode = args.mode & 0o777; { let state = state.borrow(); state.borrow::<Permissions>().write.check(&path)?; } tokio::task::spawn_blocking(move || { debug!("op_chmod_async {} {:o}", path.display(), mode); #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let permissions = PermissionsExt::from_mode(mode); std::fs::set_permissions(&path, permissions)?; Ok(()) } // TODO Implement chmod for Windows (#4357) #[cfg(not(unix))] { // Still check file/dir exists on Windows let _metadata = std::fs::metadata(&path)?; Err(not_supported()) } }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct ChownArgs { path: String, uid: Option<u32>, gid: Option<u32>, } fn op_chown_sync( state: &mut OpState, args: ChownArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = Path::new(&args.path).to_path_buf(); state.borrow::<Permissions>().write.check(&path)?; debug!( "op_chown_sync {} {:?} {:?}", path.display(), args.uid, args.gid, ); #[cfg(unix)] { use nix::unistd::{chown, Gid, Uid}; let nix_uid = args.uid.map(Uid::from_raw); let nix_gid = args.gid.map(Gid::from_raw); chown(&path, nix_uid, nix_gid)?; Ok(()) } // TODO Implement chown for Windows #[cfg(not(unix))] { Err(generic_error("Not implemented")) } } async fn op_chown_async( state: Rc<RefCell<OpState>>, args: ChownArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = Path::new(&args.path).to_path_buf(); { let state = state.borrow(); state.borrow::<Permissions>().write.check(&path)?; } tokio::task::spawn_blocking(move || { debug!( "op_chown_async {} {:?} {:?}", path.display(), args.uid, args.gid, ); #[cfg(unix)] { use nix::unistd::{chown, Gid, Uid}; let nix_uid = args.uid.map(Uid::from_raw); let nix_gid = args.gid.map(Gid::from_raw); chown(&path, nix_uid, nix_gid)?; Ok(()) } // TODO Implement chown for Windows #[cfg(not(unix))] Err(not_supported()) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct RemoveArgs { path: String, recursive: bool, } fn op_remove_sync( state: &mut OpState, args: RemoveArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = PathBuf::from(&args.path); let recursive = args.recursive; state.borrow::<Permissions>().write.check(&path)?; #[cfg(not(unix))] use std::os::windows::prelude::MetadataExt; let metadata = std::fs::symlink_metadata(&path)?; debug!("op_remove_sync {} {}", path.display(), recursive); let file_type = metadata.file_type(); if file_type.is_file() { std::fs::remove_file(&path)?; } else if recursive { std::fs::remove_dir_all(&path)?; } else if file_type.is_symlink() { #[cfg(unix)] std::fs::remove_file(&path)?; #[cfg(not(unix))] { use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY; if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 { std::fs::remove_dir(&path)?; } else { std::fs::remove_file(&path)?; } } } else if file_type.is_dir() { std::fs::remove_dir(&path)?; } else { // pipes, sockets, etc... std::fs::remove_file(&path)?; } Ok(()) } async fn op_remove_async( state: Rc<RefCell<OpState>>, args: RemoveArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = PathBuf::from(&args.path); let recursive = args.recursive; { let state = state.borrow(); state.borrow::<Permissions>().write.check(&path)?; } tokio::task::spawn_blocking(move || { #[cfg(not(unix))] use std::os::windows::prelude::MetadataExt; let metadata = std::fs::symlink_metadata(&path)?; debug!("op_remove_async {} {}", path.display(), recursive); let file_type = metadata.file_type(); if file_type.is_file() { std::fs::remove_file(&path)?; } else if recursive { std::fs::remove_dir_all(&path)?; } else if file_type.is_symlink() { #[cfg(unix)] std::fs::remove_file(&path)?; #[cfg(not(unix))] { use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY; if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 { std::fs::remove_dir(&path)?; } else { std::fs::remove_file(&path)?; } } } else if file_type.is_dir() { std::fs::remove_dir(&path)?; } else { // pipes, sockets, etc... std::fs::remove_file(&path)?; } Ok(()) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct CopyFileArgs { from: String, to: String, } fn op_copy_file_sync( state: &mut OpState, args: CopyFileArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let from = PathBuf::from(&args.from); let to = PathBuf::from(&args.to); let permissions = state.borrow::<Permissions>(); permissions.read.check(&from)?; permissions.write.check(&to)?; debug!("op_copy_file_sync {} {}", from.display(), to.display()); // On *nix, Rust reports non-existent `from` as ErrorKind::InvalidInput // See https://github.com/rust-lang/rust/issues/54800 // Once the issue is resolved, we should remove this workaround. if cfg!(unix) && !from.is_file() { return Err(custom_error("NotFound", "File not found")); } // returns size of from as u64 (we ignore) std::fs::copy(&from, &to)?; Ok(()) } async fn op_copy_file_async( state: Rc<RefCell<OpState>>, args: CopyFileArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let from = PathBuf::from(&args.from); let to = PathBuf::from(&args.to); { let state = state.borrow(); let permissions = state.borrow::<Permissions>(); permissions.read.check(&from)?; permissions.write.check(&to)?; } debug!("op_copy_file_async {} {}", from.display(), to.display()); tokio::task::spawn_blocking(move || { // On *nix, Rust reports non-existent `from` as ErrorKind::InvalidInput // See https://github.com/rust-lang/rust/issues/54800 // Once the issue is resolved, we should remove this workaround. if cfg!(unix) && !from.is_file() { return Err(custom_error("NotFound", "File not found")); } // returns size of from as u64 (we ignore) std::fs::copy(&from, &to)?; Ok(()) }) .await .unwrap() } fn to_msec(maybe_time: Result<SystemTime, io::Error>) -> Option<u64> { match maybe_time { Ok(time) => { let msec = time .duration_since(UNIX_EPOCH) .map(|t| t.as_millis() as u64) .unwrap_or_else(|err| err.duration().as_millis() as u64); Some(msec) } Err(_) => None, } } #[derive(Serialize)] #[serde(rename_all = "camelCase")] pub struct FsStat { is_file: bool, is_directory: bool, is_symlink: bool, size: u64, // In milliseconds, like JavaScript. Available on both Unix or Windows. mtime: Option<u64>, atime: Option<u64>, birthtime: Option<u64>, // Following are only valid under Unix. dev: u64, ino: u64, mode: u32, nlink: u64, uid: u32, gid: u32, rdev: u64, blksize: u64, blocks: u64, } #[inline(always)] fn get_stat(metadata: std::fs::Metadata) -> FsStat { // Unix stat member (number types only). 0 if not on unix. macro_rules! usm { ($member:ident) => {{ #[cfg(unix)] { metadata.$member() } #[cfg(not(unix))] { 0 } }}; } #[cfg(unix)] use std::os::unix::fs::MetadataExt; FsStat { is_file: metadata.is_file(), is_directory: metadata.is_dir(), is_symlink: metadata.file_type().is_symlink(), size: metadata.len(), // In milliseconds, like JavaScript. Available on both Unix or Windows. mtime: to_msec(metadata.modified()), atime: to_msec(metadata.accessed()), birthtime: to_msec(metadata.created()), // Following are only valid under Unix. dev: usm!(dev), ino: usm!(ino), mode: usm!(mode), nlink: usm!(nlink), uid: usm!(uid), gid: usm!(gid), rdev: usm!(rdev), blksize: usm!(blksize), blocks: usm!(blocks), } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct StatArgs { path: String, lstat: bool, } fn op_stat_sync( state: &mut OpState, args: StatArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<FsStat, AnyError> { let path = PathBuf::from(&args.path); let lstat = args.lstat; state.borrow::<Permissions>().read.check(&path)?; debug!("op_stat_sync {} {}", path.display(), lstat); let metadata = if lstat { std::fs::symlink_metadata(&path)? } else { std::fs::metadata(&path)? }; Ok(get_stat(metadata)) } async fn op_stat_async( state: Rc<RefCell<OpState>>, args: StatArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<FsStat, AnyError> { let path = PathBuf::from(&args.path); let lstat = args.lstat; { let state = state.borrow(); state.borrow::<Permissions>().read.check(&path)?; } tokio::task::spawn_blocking(move || { debug!("op_stat_async {} {}", path.display(), lstat); let metadata = if lstat { std::fs::symlink_metadata(&path)? } else { std::fs::metadata(&path)? }; Ok(get_stat(metadata)) }) .await .unwrap() } fn op_realpath_sync( state: &mut OpState, path: String, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let path = PathBuf::from(&path); let permissions = state.borrow::<Permissions>(); permissions.read.check(&path)?; if path.is_relative() { permissions.read.check_blind(&current_dir()?, "CWD")?; } debug!("op_realpath_sync {}", path.display()); // corresponds to the realpath on Unix and // CreateFile and GetFinalPathNameByHandle on Windows let realpath = canonicalize_path(&path)?; let realpath_str = into_string(realpath.into_os_string())?; Ok(realpath_str) } async fn op_realpath_async( state: Rc<RefCell<OpState>>, path: String, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let path = PathBuf::from(&path); { let state = state.borrow(); let permissions = state.borrow::<Permissions>(); permissions.read.check(&path)?; if path.is_relative() { permissions.read.check_blind(&current_dir()?, "CWD")?; } } tokio::task::spawn_blocking(move || { debug!("op_realpath_async {}", path.display()); // corresponds to the realpath on Unix and // CreateFile and GetFinalPathNameByHandle on Windows let realpath = canonicalize_path(&path)?; let realpath_str = into_string(realpath.into_os_string())?; Ok(realpath_str) }) .await .unwrap() } #[derive(Serialize)] #[serde(rename_all = "camelCase")] pub struct DirEntry { name: String, is_file: bool, is_directory: bool, is_symlink: bool, } fn op_read_dir_sync( state: &mut OpState, path: String, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<Vec<DirEntry>, AnyError> { let path = PathBuf::from(&path); state.borrow::<Permissions>().read.check(&path)?; debug!("op_read_dir_sync {}", path.display()); let entries: Vec<_> = std::fs::read_dir(path)? .filter_map(|entry| { let entry = entry.unwrap(); // Not all filenames can be encoded as UTF-8. Skip those for now. if let Ok(name) = into_string(entry.file_name()) { Some(DirEntry { name, is_file: entry .file_type() .map_or(false, |file_type| file_type.is_file()), is_directory: entry .file_type() .map_or(false, |file_type| file_type.is_dir()), is_symlink: entry .file_type() .map_or(false, |file_type| file_type.is_symlink()), }) } else { None } }) .collect(); Ok(entries) } async fn op_read_dir_async( state: Rc<RefCell<OpState>>, path: String, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<Vec<DirEntry>, AnyError> { let path = PathBuf::from(&path); { let state = state.borrow(); state.borrow::<Permissions>().read.check(&path)?; } tokio::task::spawn_blocking(move || { debug!("op_read_dir_async {}", path.display()); let entries: Vec<_> = std::fs::read_dir(path)? .filter_map(|entry| { let entry = entry.unwrap(); // Not all filenames can be encoded as UTF-8. Skip those for now. if let Ok(name) = into_string(entry.file_name()) { Some(DirEntry { name, is_file: entry .file_type() .map_or(false, |file_type| file_type.is_file()), is_directory: entry .file_type() .map_or(false, |file_type| file_type.is_dir()), is_symlink: entry .file_type() .map_or(false, |file_type| file_type.is_symlink()), }) } else { None } }) .collect(); Ok(entries) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct RenameArgs { oldpath: String, newpath: String, } fn op_rename_sync( state: &mut OpState, args: RenameArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let oldpath = PathBuf::from(&args.oldpath); let newpath = PathBuf::from(&args.newpath); let permissions = state.borrow::<Permissions>(); permissions.read.check(&oldpath)?; permissions.write.check(&oldpath)?; permissions.write.check(&newpath)?; debug!("op_rename_sync {} {}", oldpath.display(), newpath.display()); std::fs::rename(&oldpath, &newpath)?; Ok(()) } async fn op_rename_async( state: Rc<RefCell<OpState>>, args: RenameArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let oldpath = PathBuf::from(&args.oldpath); let newpath = PathBuf::from(&args.newpath); { let state = state.borrow(); let permissions = state.borrow::<Permissions>(); permissions.read.check(&oldpath)?; permissions.write.check(&oldpath)?; permissions.write.check(&newpath)?; } tokio::task::spawn_blocking(move || { debug!( "op_rename_async {} {}", oldpath.display(), newpath.display() ); std::fs::rename(&oldpath, &newpath)?; Ok(()) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct LinkArgs { oldpath: String, newpath: String, } fn op_link_sync( state: &mut OpState, args: LinkArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let oldpath = PathBuf::from(&args.oldpath); let newpath = PathBuf::from(&args.newpath); let permissions = state.borrow::<Permissions>(); permissions.read.check(&oldpath)?; permissions.write.check(&oldpath)?; permissions.read.check(&newpath)?; permissions.write.check(&newpath)?; debug!("op_link_sync {} {}", oldpath.display(), newpath.display()); std::fs::hard_link(&oldpath, &newpath)?; Ok(()) } async fn op_link_async( state: Rc<RefCell<OpState>>, args: LinkArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let oldpath = PathBuf::from(&args.oldpath); let newpath = PathBuf::from(&args.newpath); { let state = state.borrow(); let permissions = state.borrow::<Permissions>(); permissions.read.check(&oldpath)?; permissions.write.check(&oldpath)?; permissions.read.check(&newpath)?; permissions.write.check(&newpath)?; } tokio::task::spawn_blocking(move || { debug!("op_link_async {} {}", oldpath.display(), newpath.display()); std::fs::hard_link(&oldpath, &newpath)?; Ok(()) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct SymlinkArgs { oldpath: String, newpath: String, #[cfg(not(unix))] options: Option<SymlinkOptions>, } #[cfg(not(unix))] #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct SymlinkOptions { _type: String, } fn op_symlink_sync( state: &mut OpState, args: SymlinkArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let oldpath = PathBuf::from(&args.oldpath); let newpath = PathBuf::from(&args.newpath); state.borrow::<Permissions>().write.check(&newpath)?; debug!( "op_symlink_sync {} {}", oldpath.display(), newpath.display() ); #[cfg(unix)] { use std::os::unix::fs::symlink; symlink(&oldpath, &newpath)?; Ok(()) } #[cfg(not(unix))] { use std::os::windows::fs::{symlink_dir, symlink_file}; match args.options { Some(options) => match options._type.as_ref() { "file" => symlink_file(&oldpath, &newpath)?, "dir" => symlink_dir(&oldpath, &newpath)?, _ => return Err(type_error("unsupported type")), }, None => { let old_meta = std::fs::metadata(&oldpath); match old_meta { Ok(metadata) => { if metadata.is_file() { symlink_file(&oldpath, &newpath)? } else if metadata.is_dir() { symlink_dir(&oldpath, &newpath)? } } Err(_) => return Err(type_error("you must pass a `options` argument for non-existent target path in windows".to_string())), } } }; Ok(()) } } async fn op_symlink_async( state: Rc<RefCell<OpState>>, args: SymlinkArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let oldpath = PathBuf::from(&args.oldpath); let newpath = PathBuf::from(&args.newpath); { let state = state.borrow(); state.borrow::<Permissions>().write.check(&newpath)?; } tokio::task::spawn_blocking(move || { debug!("op_symlink_async {} {}", oldpath.display(), newpath.display()); #[cfg(unix)] { use std::os::unix::fs::symlink; symlink(&oldpath, &newpath)?; Ok(()) } #[cfg(not(unix))] { use std::os::windows::fs::{symlink_dir, symlink_file}; match args.options { Some(options) => match options._type.as_ref() { "file" => symlink_file(&oldpath, &newpath)?, "dir" => symlink_dir(&oldpath, &newpath)?, _ => return Err(type_error("unsupported type")), }, None => { let old_meta = std::fs::metadata(&oldpath); match old_meta { Ok(metadata) => { if metadata.is_file() { symlink_file(&oldpath, &newpath)? } else if metadata.is_dir() { symlink_dir(&oldpath, &newpath)? } } Err(_) => return Err(type_error("you must pass a `options` argument for non-existent target path in windows".to_string())), } } }; Ok(()) } }) .await .unwrap() } fn op_read_link_sync( state: &mut OpState, path: String, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let path = PathBuf::from(&path); state.borrow::<Permissions>().read.check(&path)?; debug!("op_read_link_value {}", path.display()); let target = std::fs::read_link(&path)?.into_os_string(); let targetstr = into_string(target)?; Ok(targetstr) } async fn op_read_link_async( state: Rc<RefCell<OpState>>, path: String, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let path = PathBuf::from(&path); { let state = state.borrow(); state.borrow::<Permissions>().read.check(&path)?; } tokio::task::spawn_blocking(move || { debug!("op_read_link_async {}", path.display()); let target = std::fs::read_link(&path)?.into_os_string(); let targetstr = into_string(target)?; Ok(targetstr) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct FtruncateArgs { rid: ResourceId, len: i32, } fn op_ftruncate_sync( state: &mut OpState, args: FtruncateArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { super::check_unstable(state, "Deno.ftruncate"); let rid = args.rid; let len = args.len as u64; StdFileResource::with(state, rid, |r| match r { Ok(std_file) => std_file.set_len(len).map_err(AnyError::from), Err(_) => Err(type_error("cannot truncate this type of resource")), })?; Ok(()) } async fn op_ftruncate_async( state: Rc<RefCell<OpState>>, args: FtruncateArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { super::check_unstable2(&state, "Deno.ftruncate"); let rid = args.rid; let len = args.len as u64; let resource = state .borrow_mut() .resource_table .get::<StdFileResource>(rid) .ok_or_else(bad_resource_id)?; if resource.fs_file.is_none() { return Err(bad_resource_id()); } let mut fs_file = RcRef::map(&resource, |r| r.fs_file.as_ref().unwrap()) .borrow_mut() .await; (*fs_file).0.as_mut().unwrap().set_len(len).await?; Ok(()) } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct TruncateArgs { path: String, len: u64, } fn op_truncate_sync( state: &mut OpState, args: TruncateArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = PathBuf::from(&args.path); let len = args.len; state.borrow::<Permissions>().write.check(&path)?; debug!("op_truncate_sync {} {}", path.display(), len); let f = std::fs::OpenOptions::new().write(true).open(&path)?; f.set_len(len)?; Ok(()) } async fn op_truncate_async( state: Rc<RefCell<OpState>>, args: TruncateArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { let path = PathBuf::from(&args.path); let len = args.len; { let state = state.borrow(); state.borrow::<Permissions>().write.check(&path)?; } tokio::task::spawn_blocking(move || { debug!("op_truncate_async {} {}", path.display(), len); let f = std::fs::OpenOptions::new().write(true).open(&path)?; f.set_len(len)?; Ok(()) }) .await .unwrap() } fn make_temp( dir: Option<&Path>, prefix: Option<&str>, suffix: Option<&str>, is_dir: bool, ) -> std::io::Result<PathBuf> { let prefix_ = prefix.unwrap_or(""); let suffix_ = suffix.unwrap_or(""); let mut buf: PathBuf = match dir { Some(ref p) => p.to_path_buf(), None => temp_dir(), } .join("_"); let mut rng = thread_rng(); loop { let unique = rng.gen::<u32>(); buf.set_file_name(format!("{}{:08x}{}", prefix_, unique, suffix_)); let r = if is_dir { #[allow(unused_mut)] let mut builder = std::fs::DirBuilder::new(); #[cfg(unix)] { use std::os::unix::fs::DirBuilderExt; builder.mode(0o700); } builder.create(buf.as_path()) } else { let mut open_options = std::fs::OpenOptions::new(); open_options.write(true).create_new(true); #[cfg(unix)] { use std::os::unix::fs::OpenOptionsExt; open_options.mode(0o600); } open_options.open(buf.as_path())?; Ok(()) }; match r { Err(ref e) if e.kind() == std::io::ErrorKind::AlreadyExists => continue, Ok(_) => return Ok(buf), Err(e) => return Err(e), } } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct MakeTempArgs { dir: Option<String>, prefix: Option<String>, suffix: Option<String>, } fn op_make_temp_dir_sync( state: &mut OpState, args: MakeTempArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let dir = args.dir.map(|s| PathBuf::from(&s)); let prefix = args.prefix.map(String::from); let suffix = args.suffix.map(String::from); state .borrow::<Permissions>() .write .check(dir.clone().unwrap_or_else(temp_dir).as_path())?; // TODO(piscisaureus): use byte vector for paths, not a string. // See https://github.com/denoland/deno/issues/627. // We can't assume that paths are always valid utf8 strings. let path = make_temp( // Converting Option<String> to Option<&str> dir.as_deref(), prefix.as_deref(), suffix.as_deref(), true, )?; let path_str = into_string(path.into_os_string())?; Ok(path_str) } async fn op_make_temp_dir_async( state: Rc<RefCell<OpState>>, args: MakeTempArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let dir = args.dir.map(|s| PathBuf::from(&s)); let prefix = args.prefix.map(String::from); let suffix = args.suffix.map(String::from); { let state = state.borrow(); state .borrow::<Permissions>() .write .check(dir.clone().unwrap_or_else(temp_dir).as_path())?; } tokio::task::spawn_blocking(move || { // TODO(piscisaureus): use byte vector for paths, not a string. // See https://github.com/denoland/deno/issues/627. // We can't assume that paths are always valid utf8 strings. let path = make_temp( // Converting Option<String> to Option<&str> dir.as_deref(), prefix.as_deref(), suffix.as_deref(), true, )?; let path_str = into_string(path.into_os_string())?; Ok(path_str) }) .await .unwrap() } fn op_make_temp_file_sync( state: &mut OpState, args: MakeTempArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let dir = args.dir.map(|s| PathBuf::from(&s)); let prefix = args.prefix.map(String::from); let suffix = args.suffix.map(String::from); state .borrow::<Permissions>() .write .check(dir.clone().unwrap_or_else(temp_dir).as_path())?; // TODO(piscisaureus): use byte vector for paths, not a string. // See https://github.com/denoland/deno/issues/627. // We can't assume that paths are always valid utf8 strings. let path = make_temp( // Converting Option<String> to Option<&str> dir.as_deref(), prefix.as_deref(), suffix.as_deref(), false, )?; let path_str = into_string(path.into_os_string())?; Ok(path_str) } async fn op_make_temp_file_async( state: Rc<RefCell<OpState>>, args: MakeTempArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let dir = args.dir.map(|s| PathBuf::from(&s)); let prefix = args.prefix.map(String::from); let suffix = args.suffix.map(String::from); { let state = state.borrow(); state .borrow::<Permissions>() .write .check(dir.clone().unwrap_or_else(temp_dir).as_path())?; } tokio::task::spawn_blocking(move || { // TODO(piscisaureus): use byte vector for paths, not a string. // See https://github.com/denoland/deno/issues/627. // We can't assume that paths are always valid utf8 strings. let path = make_temp( // Converting Option<String> to Option<&str> dir.as_deref(), prefix.as_deref(), suffix.as_deref(), false, )?; let path_str = into_string(path.into_os_string())?; Ok(path_str) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct FutimeArgs { rid: ResourceId, atime: (i64, u32), mtime: (i64, u32), } fn op_futime_sync( state: &mut OpState, args: FutimeArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { super::check_unstable(state, "Deno.futimeSync"); let rid = args.rid; let atime = filetime::FileTime::from_unix_time(args.atime.0, args.atime.1); let mtime = filetime::FileTime::from_unix_time(args.mtime.0, args.mtime.1); StdFileResource::with(state, rid, |r| match r { Ok(std_file) => { filetime::set_file_handle_times(std_file, Some(atime), Some(mtime)) .map_err(AnyError::from) } Err(_) => Err(type_error( "cannot futime on this type of resource".to_string(), )), })?; Ok(()) } async fn op_futime_async( state: Rc<RefCell<OpState>>, args: FutimeArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { super::check_unstable2(&state, "Deno.futime"); let rid = args.rid; let atime = filetime::FileTime::from_unix_time(args.atime.0, args.atime.1); let mtime = filetime::FileTime::from_unix_time(args.mtime.0, args.mtime.1); let resource = state .borrow_mut() .resource_table .get::<StdFileResource>(rid) .ok_or_else(bad_resource_id)?; if resource.fs_file.is_none() { return Err(bad_resource_id()); } let mut fs_file = RcRef::map(&resource, |r| r.fs_file.as_ref().unwrap()) .borrow_mut() .await; let std_file = (*fs_file) .0 .as_mut() .unwrap() .try_clone() .await? .into_std() .await; tokio::task::spawn_blocking(move || { filetime::set_file_handle_times(&std_file, Some(atime), Some(mtime))?; Ok(()) }) .await .unwrap() } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct UtimeArgs { path: String, atime: (i64, u32), mtime: (i64, u32), } fn op_utime_sync( state: &mut OpState, args: UtimeArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { super::check_unstable(state, "Deno.utime"); let path = PathBuf::from(&args.path); let atime = filetime::FileTime::from_unix_time(args.atime.0, args.atime.1); let mtime = filetime::FileTime::from_unix_time(args.mtime.0, args.mtime.1); state.borrow::<Permissions>().write.check(&path)?; filetime::set_file_times(path, atime, mtime)?; Ok(()) } async fn op_utime_async( state: Rc<RefCell<OpState>>, args: UtimeArgs, _zero_copy: Option<ZeroCopyBuf>, ) -> Result<(), AnyError> { super::check_unstable(&state.borrow(), "Deno.utime"); let path = PathBuf::from(&args.path); let atime = filetime::FileTime::from_unix_time(args.atime.0, args.atime.1); let mtime = filetime::FileTime::from_unix_time(args.mtime.0, args.mtime.1); state.borrow().borrow::<Permissions>().write.check(&path)?; tokio::task::spawn_blocking(move || { filetime::set_file_times(path, atime, mtime)?; Ok(()) }) .await .unwrap() } fn op_cwd( state: &mut OpState, _args: (), _zero_copy: Option<ZeroCopyBuf>, ) -> Result<String, AnyError> { let path = current_dir()?; state .borrow::<Permissions>() .read .check_blind(&path, "CWD")?; let path_str = into_string(path.into_os_string())?; Ok(path_str) }
26.268551
135
0.642005
e61199ce29f241acc944fcc41355bd82d6ef3b5b
719
use std::collections::HashMap; use crate::engine::schedule::ScheduleOptions; use crate::engine::state::State; pub trait Agent { type SimState: State + Sync + Send; fn step(&mut self, state: &Self::SimState); /// Specifies whether this agent should be removed from the schedule after the current step. fn should_remove(&mut self, _state: &Self::SimState) -> bool { false } /// Allows the agent to schedule new agents without having direct access to the Schedule. /// This should NOT return an agent that is already scheduled. fn should_reproduce( &mut self, _state: &Self::SimState, ) -> Option<HashMap<Box<Self>, ScheduleOptions>> { None } }
28.76
96
0.663421
8a6f23c83790eb5270ad41b39a7d2e049f1fb044
4,033
#![recursion_limit = "128"] use std::time::Duration; use yew::services::{ConsoleService, IntervalService, Task, TimeoutService}; use yew::{html, Callback, Component, ComponentLink, Html, ShouldRender}; pub struct Model { link: ComponentLink<Self>, timeout: TimeoutService, interval: IntervalService, console: ConsoleService, callback_tick: Callback<()>, callback_done: Callback<()>, job: Option<Box<dyn Task>>, messages: Vec<&'static str>, _standalone: Box<dyn Task>, } pub enum Msg { StartTimeout, StartInterval, Cancel, Done, Tick, } impl Component for Model { type Message = Msg; type Properties = (); fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self { // This callback doesn't send any message to a scope let callback = |_| { println!("Example of a standalone callback."); }; let mut interval = IntervalService::new(); let handle = interval.spawn(Duration::from_secs(10), callback.into()); Model { link: link.clone(), timeout: TimeoutService::new(), interval, console: ConsoleService::new(), callback_tick: link.callback(|_| Msg::Tick), callback_done: link.callback(|_| Msg::Done), job: None, messages: Vec::new(), _standalone: Box::new(handle), } } fn update(&mut self, msg: Self::Message) -> ShouldRender { match msg { Msg::StartTimeout => { { let handle = self .timeout .spawn(Duration::from_secs(3), self.callback_done.clone()); self.job = Some(Box::new(handle)); } self.messages.clear(); self.console.clear(); self.messages.push("Timer started!"); self.console.time_named("Timer"); } Msg::StartInterval => { { let handle = self .interval .spawn(Duration::from_secs(1), self.callback_tick.clone()); self.job = Some(Box::new(handle)); } self.messages.clear(); self.console.clear(); self.messages.push("Interval started!"); self.console.log("Interval started!"); } Msg::Cancel => { if let Some(mut task) = self.job.take() { task.cancel(); } self.messages.push("Canceled!"); self.console.warn("Canceled!"); self.console.assert(self.job.is_none(), "Job still exists!"); } Msg::Done => { self.messages.push("Done!"); self.console.group(); self.console.info("Done!"); self.console.time_named_end("Timer"); self.console.group_end(); self.job = None; } Msg::Tick => { self.messages.push("Tick..."); self.console.count_named("Tick"); } } true } fn view(&self) -> Html { let view_message = |message| { html! { <p>{ message }</p> } }; let has_job = self.job.is_some(); html! { <div> <button disabled=has_job onclick=self.link.callback(|_| Msg::StartTimeout)>{ "Start Timeout" }</button> <button disabled=has_job onclick=self.link.callback(|_| Msg::StartInterval)>{ "Start Interval" }</button> <button disabled=!has_job onclick=self.link.callback(|_| Msg::Cancel)>{ "Cancel!" }</button> <div> { for self.messages.iter().map(view_message) } </div> </div> } } }
33.057377
104
0.485743
398cccb06c63b23ea9cea97eb5c58494853a134c
2,308
use crate::raw::{Env, Local}; use std::mem::MaybeUninit; use std::os::raw::c_void; use std::slice; use crate::napi::bindings as napi; pub unsafe fn new(env: Env, len: usize) -> Result<Local, napi::Status> { let (buf, bytes) = uninitialized(env, len)?; std::ptr::write_bytes(bytes, 0, len); Ok(buf) } pub unsafe fn uninitialized(env: Env, len: usize) -> Result<(Local, *mut u8), napi::Status> { let mut buf = MaybeUninit::uninit(); let mut bytes = MaybeUninit::uninit(); let status = napi::create_buffer(env, len, bytes.as_mut_ptr(), buf.as_mut_ptr()); if status == napi::Status::PendingException { return Err(status); } assert_eq!(status, napi::Status::Ok); Ok((buf.assume_init(), bytes.assume_init().cast())) } pub unsafe fn new_external<T>(env: Env, data: T) -> Local where T: AsMut<[u8]> + Send, { // Safety: Boxing could move the data; must box before grabbing a raw pointer let mut data = Box::new(data); let buf = data.as_mut().as_mut(); let length = buf.len(); let mut result = MaybeUninit::uninit(); assert_eq!( napi::create_external_buffer( env, length, buf.as_mut_ptr() as *mut _, Some(drop_external::<T>), Box::into_raw(data) as *mut _, result.as_mut_ptr(), ), napi::Status::Ok, ); result.assume_init() } pub unsafe fn data(env: Env, base_out: &mut *mut c_void, obj: Local) -> usize { let mut size = 0; assert_eq!( napi::get_buffer_info(env, obj, base_out as *mut _, &mut size as *mut _), napi::Status::Ok, ); size } unsafe extern "C" fn drop_external<T>(_env: Env, _data: *mut c_void, hint: *mut c_void) { Box::<T>::from_raw(hint as *mut _); } /// # Safety /// * Caller must ensure `env` and `buf` are valid /// * The lifetime `'a` does not exceed the lifetime of `Env` or `buf` pub unsafe fn as_mut_slice<'a>(env: Env, buf: Local) -> &'a mut [u8] { let mut data = MaybeUninit::uninit(); let mut size = 0usize; assert_eq!( napi::get_buffer_info(env, buf, data.as_mut_ptr(), &mut size as *mut _), napi::Status::Ok, ); if size == 0 { return &mut []; } slice::from_raw_parts_mut(data.assume_init().cast(), size) }
26.837209
93
0.59662
bbc8d0485cd04ae6b904ae014659b5106249936a
3,682
/// Problem 8 /// The four adjacent digits in the 1000-digit number that have the greatest /// product are 9 × 9 × 8 × 9 = 5832. /// /// 73167176531330624919225119674426574742355349194934 /// 96983520312774506326239578318016984801869478851843 /// 85861560789112949495459501737958331952853208805511 /// 12540698747158523863050715693290963295227443043557 /// 66896648950445244523161731856403098711121722383113 /// 62229893423380308135336276614282806444486645238749 /// 30358907296290491560440772390713810515859307960866 /// 70172427121883998797908792274921901699720888093776 /// 65727333001053367881220235421809751254540594752243 /// 52584907711670556013604839586446706324415722155397 /// 53697817977846174064955149290862569321978468622482 /// 83972241375657056057490261407972968652414535100474 /// 82166370484403199890008895243450658541227588666881 /// 16427171479924442928230863465674813919123162824586 /// 17866458359124566529476545682848912883142607690042 /// 24219022671055626321111109370544217506941658960408 /// 07198403850962455444362981230987879927244284909188 /// 84580156166097919133875499200524063689912560717606 /// 05886116467109405077541002256983155200055935729725 /// 71636269561882670428252483600823257530420752963450 /// /// Find the thirteen adjacent digits in the 1000-digit number that have the /// greatest product. What is the value of this product? fn main() { // How many adjacent digits to check? let adj: usize = 13; let num: String = "73167176531330624919225119674426574742355349194934\ 96983520312774506326239578318016984801869478851843\ 85861560789112949495459501737958331952853208805511\ 12540698747158523863050715693290963295227443043557\ 66896648950445244523161731856403098711121722383113\ 62229893423380308135336276614282806444486645238749\ 30358907296290491560440772390713810515859307960866\ 70172427121883998797908792274921901699720888093776\ 65727333001053367881220235421809751254540594752243\ 52584907711670556013604839586446706324415722155397\ 53697817977846174064955149290862569321978468622482\ 83972241375657056057490261407972968652414535100474\ 82166370484403199890008895243450658541227588666881\ 16427171479924442928230863465674813919123162824586\ 17866458359124566529476545682848912883142607690042\ 24219022671055626321111109370544217506941658960408\ 07198403850962455444362981230987879927244284909188\ 84580156166097919133875499200524063689912560717606\ 05886116467109405077541002256983155200055935729725\ 71636269561882670428252483600823257530420752963450".to_string(); let digits: Vec<u8> = num.chars() .map(|c| c.to_digit(10).unwrap() as u8) .collect(); let mut window: Vec<u8> = Vec::new(); let mut max: u64 = 0; let mut max_window: Vec<u8> = vec![0u8; adj]; for d in digits { if window.len() < adj { window.push(d); } else { let mut mul: u64 = 1; for v in &window { mul *= *v as u64; } if mul > max { max = mul; max_window.clone_from_slice(&window); } window.push(d); window = window.split_off(1); } } print!("Answer: {} [digits: ", max); for d in max_window { print!("{}", d); } print!("]\n"); }
43.833333
84
0.700163
1d0b9829242d50f959b76679056673192a834d73
14,115
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Types/fns concerning Internet Protocol (IP), versions 4 & 6 #[allow(missing_doc)]; use core::prelude::*; use core::libc; use core::comm::{stream, SharedChan}; use core::ptr; use core::result; use core::str; use iotask = uv::iotask::IoTask; use interact = uv::iotask::interact; use sockaddr_in = super::uv_ll::sockaddr_in; use sockaddr_in6 = super::uv_ll::sockaddr_in6; use addrinfo = super::uv_ll::addrinfo; use uv_getaddrinfo_t = super::uv_ll::uv_getaddrinfo_t; use uv_ip4_name = super::uv_ll::ip4_name; use uv_ip4_port = super::uv_ll::ip4_port; use uv_ip6_name = super::uv_ll::ip6_name; use uv_ip6_port = super::uv_ll::ip6_port; use uv_getaddrinfo = super::uv_ll::getaddrinfo; use uv_freeaddrinfo = super::uv_ll::freeaddrinfo; use create_uv_getaddrinfo_t = super::uv_ll::getaddrinfo_t; use set_data_for_req = super::uv_ll::set_data_for_req; use get_data_for_req = super::uv_ll::get_data_for_req; use ll = super::uv_ll; /// An IP address pub enum IpAddr { /// An IPv4 address Ipv4(sockaddr_in), Ipv6(sockaddr_in6) } /// Human-friendly feedback on why a parse_addr attempt failed pub struct ParseAddrErr { err_msg: ~str, } /** * Convert a `IpAddr` to a str * * # Arguments * * * ip - a `std::net::ip::IpAddr` */ pub fn format_addr(ip: &IpAddr) -> ~str { match *ip { Ipv4(ref addr) => unsafe { let result = uv_ip4_name(addr); if result == ~"" { fail!("failed to convert inner sockaddr_in address to str") } result }, Ipv6(ref addr) => unsafe { let result = uv_ip6_name(addr); if result == ~"" { fail!("failed to convert inner sockaddr_in address to str") } result } } } /** * Get the associated port * * # Arguments * * ip - a `std::net::ip::IpAddr` */ pub fn get_port(ip: &IpAddr) -> uint { match *ip { Ipv4(ref addr) => unsafe { uv_ip4_port(addr) }, Ipv6(ref addr) => unsafe { uv_ip6_port(addr) } } } /// Represents errors returned from `net::ip::get_addr()` enum IpGetAddrErr { GetAddrUnknownError } /** * Attempts name resolution on the provided `node` string * * # Arguments * * * `node` - a string representing some host address * * `iotask` - a `uv::iotask` used to interact with the underlying event loop * * # Returns * * A `result<~[ip_addr], ip_get_addr_err>` instance that will contain * a vector of `ip_addr` results, in the case of success, or an error * object in the case of failure */ pub fn get_addr(node: &str, iotask: &iotask) -> result::Result<~[IpAddr], IpGetAddrErr> { let (output_po, output_ch) = stream(); let mut output_ch = Some(SharedChan::new(output_ch)); do str::as_buf(node) |node_ptr, len| { let output_ch = output_ch.swap_unwrap(); debug!("slice len %?", len); let handle = create_uv_getaddrinfo_t(); let handle_ptr: *uv_getaddrinfo_t = &handle; let handle_data = GetAddrData { output_ch: output_ch.clone() }; let handle_data_ptr: *GetAddrData = &handle_data; do interact(iotask) |loop_ptr| { unsafe { let result = uv_getaddrinfo( loop_ptr, handle_ptr, get_addr_cb, node_ptr, ptr::null(), ptr::null()); match result { 0i32 => { set_data_for_req(handle_ptr, handle_data_ptr); } _ => { output_ch.send(result::Err(GetAddrUnknownError)); } } } }; output_po.recv() } } pub mod v4 { use core::prelude::*; use net::ip::{IpAddr, Ipv4, ParseAddrErr}; use uv::ll; use uv_ip4_addr = uv::ll::ip4_addr; use uv_ip4_name = uv::ll::ip4_name; use core::cast::transmute; use core::result; use core::uint; /** * Convert a str to `ip_addr` * * # Failure * * Fails if the string is not a valid IPv4 address * * # Arguments * * * ip - a string of the format `x.x.x.x` * * # Returns * * * an `ip_addr` of the `ipv4` variant */ pub fn parse_addr(ip: &str) -> IpAddr { match try_parse_addr(ip) { result::Ok(addr) => addr, result::Err(ref err_data) => fail!(copy err_data.err_msg) } } // the simple, old style numberic representation of // ipv4 pub struct Ipv4Rep { a: u8, b: u8, c: u8, d: u8 } pub trait AsUnsafeU32 { unsafe fn as_u32(&self) -> u32; } impl AsUnsafeU32 for Ipv4Rep { // this is pretty dastardly, i know unsafe fn as_u32(&self) -> u32 { let this: &mut u32 = transmute(self); *this } } pub fn parse_to_ipv4_rep(ip: &str) -> result::Result<Ipv4Rep, ~str> { let parts: ~[uint] = ip.split_iter('.').transform(|s| { match uint::from_str(s) { Some(n) if n <= 255 => n, _ => 256 } }).collect(); if parts.len() != 4 { Err(fmt!("'%s' doesn't have 4 parts", ip)) } else if parts.iter().any_(|x| *x == 256u) { Err(fmt!("invalid octal in addr '%s'", ip)) } else { Ok(Ipv4Rep { a: parts[0] as u8, b: parts[1] as u8, c: parts[2] as u8, d: parts[3] as u8, }) } } pub fn try_parse_addr(ip: &str) -> result::Result<IpAddr,ParseAddrErr> { unsafe { let INADDR_NONE = ll::get_INADDR_NONE(); let ip_rep_result = parse_to_ipv4_rep(ip); if result::is_err(&ip_rep_result) { let err_str = result::get_err(&ip_rep_result); return result::Err(ParseAddrErr { err_msg: err_str }) } // ipv4_rep.as_u32 is unsafe :/ let input_is_inaddr_none = result::get(&ip_rep_result).as_u32() == INADDR_NONE; let new_addr = uv_ip4_addr(ip, 22); let reformatted_name = uv_ip4_name(&new_addr); debug!("try_parse_addr: input ip: %s reparsed ip: %s", ip, reformatted_name); let ref_ip_rep_result = parse_to_ipv4_rep(reformatted_name); if result::is_err(&ref_ip_rep_result) { let err_str = result::get_err(&ref_ip_rep_result); return Err(ParseAddrErr { err_msg: err_str }) } if result::get(&ref_ip_rep_result).as_u32() == INADDR_NONE && !input_is_inaddr_none { Err(ParseAddrErr { err_msg: ~"uv_ip4_name produced invalid result.", }) } else { Ok(Ipv4(copy(new_addr))) } } } } pub mod v6 { use core::prelude::*; use net::ip::{IpAddr, Ipv6, ParseAddrErr}; use uv_ip6_addr = uv::ll::ip6_addr; use uv_ip6_name = uv::ll::ip6_name; use core::result; /** * Convert a str to `ip_addr` * * # Failure * * Fails if the string is not a valid IPv6 address * * # Arguments * * * ip - an ipv6 string. See RFC2460 for spec. * * # Returns * * * an `ip_addr` of the `ipv6` variant */ pub fn parse_addr(ip: &str) -> IpAddr { match try_parse_addr(ip) { result::Ok(addr) => addr, result::Err(err_data) => fail!(copy err_data.err_msg) } } pub fn try_parse_addr(ip: &str) -> result::Result<IpAddr,ParseAddrErr> { unsafe { // need to figure out how to establish a parse failure.. let new_addr = uv_ip6_addr(ip, 22); let reparsed_name = uv_ip6_name(&new_addr); debug!("v6::try_parse_addr ip: '%s' reparsed '%s'", ip, reparsed_name); // '::' appears to be uv_ip6_name() returns for bogus // parses.. if ip != &"::" && reparsed_name == ~"::" { Err(ParseAddrErr { err_msg:fmt!("failed to parse '%s'", ip) }) } else { Ok(Ipv6(new_addr)) } } } } struct GetAddrData { output_ch: SharedChan<result::Result<~[IpAddr],IpGetAddrErr>> } extern fn get_addr_cb(handle: *uv_getaddrinfo_t, status: libc::c_int, res: *addrinfo) { unsafe { debug!("in get_addr_cb"); let handle_data = get_data_for_req(handle) as *GetAddrData; let output_ch = (*handle_data).output_ch.clone(); if status == 0i32 { if res != (ptr::null::<addrinfo>()) { let mut out_vec = ~[]; debug!("initial addrinfo: %?", res); let mut curr_addr = res; loop { let new_ip_addr = if ll::is_ipv4_addrinfo(curr_addr) { Ipv4(copy(( *ll::addrinfo_as_sockaddr_in(curr_addr)))) } else if ll::is_ipv6_addrinfo(curr_addr) { Ipv6(copy(( *ll::addrinfo_as_sockaddr_in6(curr_addr)))) } else { debug!("curr_addr is not of family AF_INET or \ AF_INET6. Error."); output_ch.send( result::Err(GetAddrUnknownError)); break; }; out_vec.push(new_ip_addr); let next_addr = ll::get_next_addrinfo(curr_addr); if next_addr == ptr::null::<addrinfo>() as *addrinfo { debug!("null next_addr encountered. no mas"); break; } else { curr_addr = next_addr; debug!("next_addr addrinfo: %?", curr_addr); } } debug!("successful process addrinfo result, len: %?", out_vec.len()); output_ch.send(result::Ok(out_vec)); } else { debug!("addrinfo pointer is NULL"); output_ch.send( result::Err(GetAddrUnknownError)); } } else { debug!("status != 0 error in get_addr_cb"); output_ch.send( result::Err(GetAddrUnknownError)); } if res != (ptr::null::<addrinfo>()) { uv_freeaddrinfo(res); } debug!("leaving get_addr_cb"); } } #[cfg(test)] mod test { use net_ip::*; use net_ip::v4; use net_ip::v6; use uv; use core::result; #[test] fn test_ip_ipv4_parse_and_format_ip() { let localhost_str = ~"127.0.0.1"; assert!(format_addr(&v4::parse_addr(localhost_str)) == localhost_str) } #[test] fn test_ip_ipv6_parse_and_format_ip() { let localhost_str = ~"::1"; let format_result = format_addr(&v6::parse_addr(localhost_str)); debug!("results: expected: '%s' actual: '%s'", localhost_str, format_result); assert_eq!(format_result, localhost_str); } #[test] fn test_ip_ipv4_bad_parse() { match v4::try_parse_addr("b4df00d") { result::Err(ref err_info) => { debug!("got error as expected %?", err_info); assert!(true); } result::Ok(ref addr) => { fail!("Expected failure, but got addr %?", addr); } } } #[test] #[ignore(target_os="win32")] fn test_ip_ipv6_bad_parse() { match v6::try_parse_addr("::,~2234k;") { result::Err(ref err_info) => { debug!("got error as expected %?", err_info); assert!(true); } result::Ok(ref addr) => { fail!("Expected failure, but got addr %?", addr); } } } #[test] #[ignore(reason = "valgrind says it's leaky")] fn test_ip_get_addr() { let localhost_name = ~"localhost"; let iotask = &uv::global_loop::get(); let ga_result = get_addr(localhost_name, iotask); if result::is_err(&ga_result) { fail!("got err result from net::ip::get_addr();") } // note really sure how to reliably test/assert // this.. mostly just wanting to see it work, atm. let results = result::unwrap(ga_result); debug!("test_get_addr: Number of results for %s: %?", localhost_name, results.len()); for results.iter().advance |r| { let ipv_prefix = match *r { Ipv4(_) => ~"IPv4", Ipv6(_) => ~"IPv6" }; debug!("test_get_addr: result %s: '%s'", ipv_prefix, format_addr(r)); } // at least one result.. this is going to vary from system // to system, based on stuff like the contents of /etc/hosts assert!(!results.is_empty()); } #[test] #[ignore(reason = "valgrind says it's leaky")] fn test_ip_get_addr_bad_input() { let localhost_name = ~"sjkl234m,./sdf"; let iotask = &uv::global_loop::get(); let ga_result = get_addr(localhost_name, iotask); assert!(result::is_err(&ga_result)); } }
31.366667
78
0.52469
fce0b87557f367cb7b7eb645c0f4f2e463c903e5
796
use cmake; fn main() { if cfg!(feature = "cuda") { let dst = cmake::Config::new("cuda_mining").build(); println!("cargo:rustc-link-search=native={}", dst.display()); println!("cargo:rustc-link-lib=static=cuda_mining"); println!("cargo:rustc-link-lib=dylib=stdc++"); // link to stdc++ lib let lib_path = env!("LD_LIBRARY_PATH"); let cuda_lib_path: Vec<_> = lib_path.split(':').into_iter().filter(|path| path.contains("cuda")).collect(); if cuda_lib_path.is_empty() { panic!("Ensure cuda installed on your environment"); } else { println!("cargo:rustc-link-search=native={}", cuda_lib_path[0]); println!("cargo:rustc-link-lib=cudart"); // cuda run-time lib } } }
37.904762
115
0.576633
0abcee71ac72910a8815eac65a2a0f4bdb23a5ad
12,363
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub fn serialize_operation_crate_operation_activate_anomaly_detector( input: &crate::input::ActivateAnomalyDetectorInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_activate_anomaly_detector_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_back_test_anomaly_detector( input: &crate::input::BackTestAnomalyDetectorInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_back_test_anomaly_detector_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_create_alert( input: &crate::input::CreateAlertInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_create_alert_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_create_anomaly_detector( input: &crate::input::CreateAnomalyDetectorInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_create_anomaly_detector_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_create_metric_set( input: &crate::input::CreateMetricSetInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_create_metric_set_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_delete_alert( input: &crate::input::DeleteAlertInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_delete_alert_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_delete_anomaly_detector( input: &crate::input::DeleteAnomalyDetectorInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_delete_anomaly_detector_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_describe_alert( input: &crate::input::DescribeAlertInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_describe_alert_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_describe_anomaly_detection_executions( input: &crate::input::DescribeAnomalyDetectionExecutionsInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_describe_anomaly_detection_executions_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_describe_anomaly_detector( input: &crate::input::DescribeAnomalyDetectorInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_describe_anomaly_detector_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_describe_metric_set( input: &crate::input::DescribeMetricSetInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_describe_metric_set_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_get_anomaly_group( input: &crate::input::GetAnomalyGroupInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_get_anomaly_group_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_get_feedback( input: &crate::input::GetFeedbackInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_get_feedback_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_get_sample_data( input: &crate::input::GetSampleDataInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_get_sample_data_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_alerts( input: &crate::input::ListAlertsInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_alerts_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_anomaly_detectors( input: &crate::input::ListAnomalyDetectorsInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_anomaly_detectors_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_anomaly_group_related_metrics( input: &crate::input::ListAnomalyGroupRelatedMetricsInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_anomaly_group_related_metrics_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_anomaly_group_summaries( input: &crate::input::ListAnomalyGroupSummariesInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_anomaly_group_summaries_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_anomaly_group_time_series( input: &crate::input::ListAnomalyGroupTimeSeriesInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_anomaly_group_time_series_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_list_metric_sets( input: &crate::input::ListMetricSetsInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_list_metric_sets_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_put_feedback( input: &crate::input::PutFeedbackInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_put_feedback_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_tag_resource( input: &crate::input::TagResourceInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_tag_resource_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_update_anomaly_detector( input: &crate::input::UpdateAnomalyDetectorInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_update_anomaly_detector_input( &mut object, input, )?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) } pub fn serialize_operation_crate_operation_update_metric_set( input: &crate::input::UpdateMetricSetInput, ) -> Result<aws_smithy_http::body::SdkBody, aws_smithy_http::operation::SerializationError> { let mut out = String::new(); let mut object = aws_smithy_json::serialize::JsonObjectWriter::new(&mut out); crate::json_ser::serialize_structure_crate_input_update_metric_set_input(&mut object, input)?; object.finish(); Ok(aws_smithy_http::body::SdkBody::from(out)) }
45.120438
100
0.751031
ac15eac646f8743d61c7b4cd907dedf3cff470fa
1,361
//! A collection of structures and functions useful across the entire amethyst project. #![warn( missing_debug_implementations, missing_docs, rust_2018_idioms, rust_2018_compatibility )] #![warn(clippy::all)] #![allow(clippy::new_without_default)] #[cfg(all(target_os = "emscripten", not(no_threading)))] compile_error!("the cfg flag \"no_threading\" is required when building for emscripten"); #[macro_use] extern crate getset; #[macro_use] extern crate derive_new; pub use alga; pub use approx; pub use nalgebra as math; pub use num_traits as num; pub use specs as ecs; pub use specs::{shred, shrev}; use rayon; use std::sync::Arc; pub use crate::{ bundle::SystemBundle, event::EventReader, system_ext::{Pausable, SystemExt}, timing::*, transform::*, }; pub use self::{ axis::{Axis2, Axis3}, hidden::{Hidden, HiddenPropagate}, hide_system::HideHierarchySystem, named::{Named, WithNamed}, system_desc::{RunNowDesc, SystemDesc}, }; pub mod bundle; pub mod deferred_dispatcher_operation; pub mod frame_limiter; pub mod geometry; pub mod timing; pub mod transform; mod axis; mod event; mod hidden; mod hide_system; mod named; mod system_desc; mod system_ext; /// A rayon thread pool wrapped in an `Arc`. This should be used as resource in `World`. pub type ArcThreadPool = Arc<rayon::ThreadPool>;
21.603175
89
0.722998
bbdeda1f246c2ef143ce0654ea4d51e87690bc3d
2,240
use crate::requests::*; use crate::types::*; /// Use this method to receive incoming updates using long polling. #[derive(Debug, Clone, PartialEq, PartialOrd, Serialize)] #[must_use = "requests do nothing unless sent"] pub struct GetUpdates { #[serde(skip_serializing_if = "Option::is_none")] offset: Option<Integer>, #[serde(skip_serializing_if = "Option::is_none")] limit: Option<Integer>, // TODO(knsd): Values between 1—100 are accepted #[serde(skip_serializing_if = "Option::is_none")] timeout: Option<Integer>, // TODO(knsd): Should be positive allowed_updates: Vec<AllowedUpdate>, // TODO(knsd) BitSet? HashSet? BTreeSet? } impl Request for GetUpdates { type Type = JsonRequestType<Self>; type Response = JsonIdResponse<Vec<Update>>; fn serialize(&self) -> Result<HttpRequest, Error> { Self::Type::serialize(RequestUrl::method("getUpdates"), self) } } impl GetUpdates { pub fn new() -> Self { GetUpdates { offset: None, limit: None, timeout: None, allowed_updates: Vec::new(), } } pub fn offset(&mut self, offset: Integer) -> &mut Self { self.offset = Some(offset); self } pub fn limit(&mut self, limit: Integer) -> &mut Self { self.limit = Some(limit); self } pub fn timeout(&mut self, timeout: Integer) -> &mut Self { self.timeout = Some(timeout); self } pub fn allowed_updates(&mut self, updates: &[AllowedUpdate]) -> &mut Self { self.allowed_updates = updates.to_vec(); self } } #[derive(Debug, Clone, PartialEq, PartialOrd, Serialize)] pub enum AllowedUpdate { #[serde(rename = "message")] Message, #[serde(rename = "edited_message")] EditedMessage, #[serde(rename = "channel_post")] ChannelPost, #[serde(rename = "edited_channel_post")] EditedChannelPost, #[serde(rename = "inline_query")] InlineQuery, #[serde(rename = "chosen_inline_query")] ChosenInlineResult, #[serde(rename = "callback_query")] CallbackQuery, #[serde(rename = "shipping_query")] ShippingQuery, #[serde(rename = "pre_checkout_query")] PreCheckoutQuery, }
28.717949
81
0.630357
28b7f40610ec4f6a4c05487ef9aff092a1dfa2d5
219
#![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #[cfg(not(feature = "sdl-graphics"))] include!("./base.rs"); #[cfg(feature = "sdl-graphics")] include!("./sdl-graphics.rs");
21.9
37
0.680365
2173c350838d9e26c7a7af21f2cd05e7ad1bd072
5,486
use core::fmt::{Debug, Formatter, Result}; use core::mem::size_of; use core::sync::atomic::{AtomicIsize, Ordering}; use crate::arch::vmm::{Vcpu, VcpuAccessGuestState}; use crate::arch::{HostPageTable, LinuxContext}; use crate::cell::Cell; use crate::consts::{HV_STACK_SIZE, LOCAL_PER_CPU_BASE}; use crate::error::HvResult; use crate::ffi::PER_CPU_ARRAY_PTR; use crate::header::HvHeader; use crate::memory::{addr::virt_to_phys, GenericPageTable, MemFlags, MemoryRegion, MemorySet}; pub const PER_CPU_SIZE: usize = size_of::<PerCpu>(); static ACTIVATED_CPUS: AtomicIsize = AtomicIsize::new(0); #[derive(Debug, Eq, PartialEq)] pub enum CpuState { HvDisabled, HvEnabled, } #[repr(align(4096))] pub struct PerCpu { pub cpu_id: usize, pub state: CpuState, pub vcpu: Vcpu, stack: [usize; HV_STACK_SIZE / size_of::<usize>()], linux: LinuxContext, hvm: MemorySet<HostPageTable>, } impl PerCpu { pub fn from_id<'a>(cpu_id: usize) -> &'a Self { unsafe { &core::slice::from_raw_parts(PER_CPU_ARRAY_PTR, HvHeader::get().max_cpus as usize) [cpu_id] } } pub fn from_id_mut<'a>(cpu_id: usize) -> &'a mut Self { unsafe { &mut core::slice::from_raw_parts_mut( PER_CPU_ARRAY_PTR, HvHeader::get().max_cpus as usize, )[cpu_id] } } pub fn from_local_base<'a>() -> &'a Self { unsafe { &*(LOCAL_PER_CPU_BASE as *const Self) } } pub fn from_local_base_mut<'a>() -> &'a mut Self { unsafe { &mut *(LOCAL_PER_CPU_BASE as *mut Self) } } pub fn stack_top(&self) -> usize { self.stack.as_ptr_range().end as _ } pub fn activated_cpus() -> usize { ACTIVATED_CPUS.load(Ordering::Acquire) as _ } pub fn init(&mut self, cpu_id: usize, linux_sp: usize, cell: &Cell) -> HvResult { info!("CPU {} init...", cpu_id); self.cpu_id = cpu_id; self.state = CpuState::HvDisabled; self.linux = LinuxContext::load_from(linux_sp); let mut hvm = cell.hvm.read().clone(); let vaddr = self as *const _ as usize; let paddr = virt_to_phys(vaddr); // Temporary mapping, will remove in Self::activate_vmm() hvm.insert(MemoryRegion::new_with_offset_mapper( vaddr, paddr, PER_CPU_SIZE, MemFlags::READ | MemFlags::WRITE, ))?; hvm.insert(MemoryRegion::new_with_offset_mapper( LOCAL_PER_CPU_BASE, paddr, PER_CPU_SIZE, MemFlags::READ | MemFlags::WRITE, ))?; trace!("PerCpu host virtual memory set: {:#x?}", hvm); unsafe { // avoid dropping, same below core::ptr::write(&mut self.hvm, hvm); self.hvm.activate(); core::ptr::write(&mut self.vcpu, Vcpu::new(&self.linux, cell)?); } self.state = CpuState::HvEnabled; Ok(()) } #[inline(never)] fn activate_vmm_local(&mut self) -> HvResult { self.vcpu.activate_vmm(&self.linux)?; unreachable!() } #[inline(never)] fn deactivate_vmm_common(&mut self) -> HvResult { self.vcpu.exit(&mut self.linux)?; self.linux.restore(); self.state = CpuState::HvDisabled; self.vcpu.deactivate_vmm(&self.linux)?; unreachable!() } pub fn activate_vmm(&mut self) -> HvResult { println!("Activating hypervisor on CPU {}...", self.cpu_id); ACTIVATED_CPUS.fetch_add(1, Ordering::SeqCst); let local_cpu_data = Self::from_local_base_mut(); let old_percpu_vaddr = self as *const _ as usize; // Switch stack to the private mapping. unsafe { asm!("add rsp, {}", in(reg) LOCAL_PER_CPU_BASE - old_percpu_vaddr) }; local_cpu_data.hvm.delete(old_percpu_vaddr)?; local_cpu_data.hvm.page_table().flush(None); local_cpu_data.activate_vmm_local() } pub fn deactivate_vmm(&mut self, ret_code: usize) -> HvResult { println!("Deactivating hypervisor on CPU {}...", self.cpu_id); ACTIVATED_CPUS.fetch_add(-1, Ordering::SeqCst); self.vcpu.set_return_val(ret_code); // Restore full per_cpu region access so that we can switch // back to the common stack mapping and to Linux page tables. let common_cpu_data = Self::from_id_mut(self.cpu_id); let common_percpu_vaddr = common_cpu_data as *const _ as usize; let paddr = virt_to_phys(common_percpu_vaddr); self.hvm.insert(MemoryRegion::new_with_offset_mapper( common_percpu_vaddr, paddr, PER_CPU_SIZE, MemFlags::READ | MemFlags::WRITE, ))?; self.hvm.page_table().flush(None); unsafe { asm!("add rsp, {}", in(reg) common_percpu_vaddr - LOCAL_PER_CPU_BASE) }; common_cpu_data.deactivate_vmm_common() } pub fn fault(&mut self) -> HvResult { warn!("VCPU fault: {:#x?}", self); self.vcpu.inject_fault()?; Ok(()) } } impl Debug for PerCpu { fn fmt(&self, f: &mut Formatter) -> Result { let mut res = f.debug_struct("PerCpu"); res.field("cpu_id", &self.cpu_id) .field("state", &self.state); if self.state != CpuState::HvDisabled { res.field("vcpu", &self.vcpu); } else { res.field("linux", &self.linux); } res.finish() } }
31.710983
94
0.597339
797ded6fa73efc73c70ca32aaa767ac9ef9c00db
9,086
// Copyright 2019 The CryptoCorrosion Contributors // Copyright 2020 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The ChaCha random number generator. use ppv_lite86::{dispatch, dispatch_light128}; pub use ppv_lite86::Machine; use ppv_lite86::{vec128_storage, ArithOps, BitOps32, LaneWords4, MultiLane, StoreBytes, Vec4, Vec4Ext, Vector}; pub(crate) const BLOCK: usize = 16; pub(crate) const BLOCK64: u64 = BLOCK as u64; const LOG2_BUFBLOCKS: u64 = 2; const BUFBLOCKS: u64 = 1 << LOG2_BUFBLOCKS; pub(crate) const BUFSZ64: u64 = BLOCK64 * BUFBLOCKS; pub(crate) const BUFSZ: usize = BUFSZ64 as usize; const STREAM_PARAM_NONCE: u32 = 1; const STREAM_PARAM_BLOCK: u32 = 0; #[derive(Clone, PartialEq, Eq)] pub struct ChaCha { pub(crate) b: vec128_storage, pub(crate) c: vec128_storage, pub(crate) d: vec128_storage, } #[derive(Clone)] pub struct State<V> { pub(crate) a: V, pub(crate) b: V, pub(crate) c: V, pub(crate) d: V, } #[inline(always)] pub(crate) fn round<V: ArithOps + BitOps32>(mut x: State<V>) -> State<V> { x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right16(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right20(); x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right24(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right25(); x } #[inline(always)] pub(crate) fn diagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words3012(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words1230(); x } #[inline(always)] pub(crate) fn undiagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words1230(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words3012(); x } impl ChaCha { #[inline(always)] pub fn new(key: &[u8; 32], nonce: &[u8]) -> Self { init_chacha(key, nonce) } /// Produce 4 blocks of output, advancing the state #[inline(always)] pub fn refill4(&mut self, drounds: u32, out: &mut [u32; BUFSZ]) { refill_wide(self, drounds, out) } #[inline(always)] pub fn set_block_pos(&mut self, value: u64) { set_stream_param(self, STREAM_PARAM_BLOCK, value) } #[inline(always)] pub fn get_block_pos(&self) -> u64 { get_stream_param(self, STREAM_PARAM_BLOCK) } #[inline(always)] pub fn set_nonce(&mut self, value: u64) { set_stream_param(self, STREAM_PARAM_NONCE, value) } #[inline(always)] pub fn get_nonce(&self) -> u64 { get_stream_param(self, STREAM_PARAM_NONCE) } #[inline(always)] pub fn get_seed(&self) -> [u8; 32] { get_seed(self) } } // This implementation is platform-independent. #[inline(always)] #[cfg(target_endian = "big")] fn add_pos<Mach: Machine>(_m: Mach, d0: Mach::u32x4, i: u64) -> Mach::u32x4 { let pos0 = ((d0.extract(1) as u64) << 32) | d0.extract(0) as u64; let pos = pos0.wrapping_add(i); d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0) } #[inline(always)] #[cfg(target_endian = "big")] fn d0123<Mach: Machine>(m: Mach, d: vec128_storage) -> Mach::u32x4x4 { let d0: Mach::u32x4 = m.unpack(d); let mut pos = ((d0.extract(1) as u64) << 32) | d0.extract(0) as u64; pos = pos.wrapping_add(1); let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); Mach::u32x4x4::from_lanes([d0, d1, d2, d3]) } // Pos is packed into the state vectors as a little-endian u64, // so on LE platforms we can use native vector ops to increment it. #[inline(always)] #[cfg(target_endian = "little")] fn add_pos<Mach: Machine>(m: Mach, d: Mach::u32x4, i: u64) -> Mach::u32x4 { let d0: Mach::u64x2 = m.unpack(d.into()); let incr = m.vec([i, 0]); m.unpack((d0 + incr).into()) } #[inline(always)] #[cfg(target_endian = "little")] fn d0123<Mach: Machine>(m: Mach, d: vec128_storage) -> Mach::u32x4x4 { let d0: Mach::u64x2 = m.unpack(d); let incr = Mach::u64x2x4::from_lanes([m.vec([0, 0]), m.vec([1, 0]), m.vec([2, 0]), m.vec([3, 0])]); m.unpack((Mach::u64x2x4::from_lanes([d0, d0, d0, d0]) + incr).into()) } #[allow(clippy::many_single_char_names)] #[inline(always)] fn refill_wide_impl<Mach: Machine>( m: Mach, state: &mut ChaCha, drounds: u32, out: &mut [u32; BUFSZ], ) { let k = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let b = m.unpack(state.b); let c = m.unpack(state.c); let mut x = State { a: Mach::u32x4x4::from_lanes([k, k, k, k]), b: Mach::u32x4x4::from_lanes([b, b, b, b]), c: Mach::u32x4x4::from_lanes([c, c, c, c]), d: d0123(m, state.d), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } let kk = Mach::u32x4x4::from_lanes([k, k, k, k]); let sb = m.unpack(state.b); let sb = Mach::u32x4x4::from_lanes([sb, sb, sb, sb]); let sc = m.unpack(state.c); let sc = Mach::u32x4x4::from_lanes([sc, sc, sc, sc]); let sd = d0123(m, state.d); let results = Mach::u32x4x4::transpose4(x.a + kk, x.b + sb, x.c + sc, x.d + sd); out[0..16].copy_from_slice(&results.0.to_scalars()); out[16..32].copy_from_slice(&results.1.to_scalars()); out[32..48].copy_from_slice(&results.2.to_scalars()); out[48..64].copy_from_slice(&results.3.to_scalars()); state.d = add_pos(m, sd.to_lanes()[0], 4).into(); } dispatch!(m, Mach, { fn refill_wide(state: &mut ChaCha, drounds: u32, out: &mut [u32; BUFSZ]) { refill_wide_impl(m, state, drounds, out); } }); // Single-block, rounds-only; shared by try_apply_keystream for tails shorter than BUFSZ // and XChaCha's setup step. dispatch!(m, Mach, { fn refill_narrow_rounds(state: &mut ChaCha, drounds: u32) -> State<vec128_storage> { let k: Mach::u32x4 = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let mut x = State { a: k, b: m.unpack(state.b), c: m.unpack(state.c), d: m.unpack(state.d), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } State { a: x.a.into(), b: x.b.into(), c: x.c.into(), d: x.d.into(), } } }); dispatch_light128!(m, Mach, { fn set_stream_param(state: &mut ChaCha, param: u32, value: u64) { let d: Mach::u32x4 = m.unpack(state.d); state.d = d .insert((value >> 32) as u32, (param << 1) | 1) .insert(value as u32, param << 1) .into(); } }); dispatch_light128!(m, Mach, { fn get_stream_param(state: &ChaCha, param: u32) -> u64 { let d: Mach::u32x4 = m.unpack(state.d); ((d.extract((param << 1) | 1) as u64) << 32) | d.extract(param << 1) as u64 } }); dispatch_light128!(m, Mach, { fn get_seed(state: &ChaCha) -> [u8; 32] { let b: Mach::u32x4 = m.unpack(state.b); let c: Mach::u32x4 = m.unpack(state.c); let mut key = [0u8; 32]; b.write_le(&mut key[..16]); c.write_le(&mut key[16..]); key } }); fn read_u32le(xs: &[u8]) -> u32 { assert_eq!(xs.len(), 4); u32::from(xs[0]) | (u32::from(xs[1]) << 8) | (u32::from(xs[2]) << 16) | (u32::from(xs[3]) << 24) } dispatch_light128!(m, Mach, { fn init_chacha(key: &[u8; 32], nonce: &[u8]) -> ChaCha { let ctr_nonce = [ 0, if nonce.len() == 12 { read_u32le(&nonce[0..4]) } else { 0 }, read_u32le(&nonce[nonce.len() - 8..nonce.len() - 4]), read_u32le(&nonce[nonce.len() - 4..]), ]; let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); ChaCha { b: key0.into(), c: key1.into(), d: ctr_nonce.into(), } } }); dispatch_light128!(m, Mach, { fn init_chacha_x(key: &[u8; 32], nonce: &[u8; 24], rounds: u32) -> ChaCha { let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); let nonce0: Mach::u32x4 = m.read_le(&nonce[..16]); let mut state = ChaCha { b: key0.into(), c: key1.into(), d: nonce0.into(), }; let x = refill_narrow_rounds(&mut state, rounds); let ctr_nonce1 = [0, 0, read_u32le(&nonce[16..20]), read_u32le(&nonce[20..24])]; state.b = x.a; state.c = x.d; state.d = ctr_nonce1.into(); state } });
32.219858
111
0.577262
9cb9433e7beaabc8fe2f4eb9ad61455bdf680dc6
10,185
//! This module implements a reference search. //! First, the element at the cursor position must be either an `ast::Name` //! or `ast::NameRef`. If it's a `ast::NameRef`, at the classification step we //! try to resolve the direct tree parent of this element, otherwise we //! already have a definition and just need to get its HIR together with //! some information that is needed for futher steps of searching. //! After that, we collect files that might contain references and look //! for text occurrences of the identifier. If there's an `ast::NameRef` //! at the index that the match starts at and its tree parent is //! resolved to the search element definition, we get a reference. mod classify; mod name_definition; mod rename; mod search_scope; use once_cell::unsync::Lazy; use ra_db::{SourceDatabase, SourceDatabaseExt}; use ra_prof::profile; use ra_syntax::{algo::find_node_at_offset, ast, AstNode, SourceFile, SyntaxNode, TextUnit}; use crate::{ db::RootDatabase, display::ToNav, FilePosition, FileRange, NavigationTarget, RangeInfo, }; pub(crate) use self::{ classify::{classify_name, classify_name_ref}, name_definition::{NameDefinition, NameKind}, rename::rename, }; pub use self::search_scope::SearchScope; #[derive(Debug, Clone)] pub struct ReferenceSearchResult { declaration: NavigationTarget, references: Vec<FileRange>, } impl ReferenceSearchResult { pub fn declaration(&self) -> &NavigationTarget { &self.declaration } pub fn references(&self) -> &[FileRange] { &self.references } /// Total number of references /// At least 1 since all valid references should /// Have a declaration pub fn len(&self) -> usize { self.references.len() + 1 } } // allow turning ReferenceSearchResult into an iterator // over FileRanges impl IntoIterator for ReferenceSearchResult { type Item = FileRange; type IntoIter = std::vec::IntoIter<FileRange>; fn into_iter(mut self) -> Self::IntoIter { let mut v = Vec::with_capacity(self.len()); v.push(FileRange { file_id: self.declaration.file_id(), range: self.declaration.range() }); v.append(&mut self.references); v.into_iter() } } pub(crate) fn find_all_refs( db: &RootDatabase, position: FilePosition, search_scope: Option<SearchScope>, ) -> Option<RangeInfo<ReferenceSearchResult>> { let parse = db.parse(position.file_id); let syntax = parse.tree().syntax().clone(); let RangeInfo { range, info: (name, def) } = find_name(db, &syntax, position)?; let declaration = match def.kind { NameKind::Macro(mac) => mac.to_nav(db), NameKind::Field(field) => field.to_nav(db), NameKind::AssocItem(assoc) => assoc.to_nav(db), NameKind::Def(def) => NavigationTarget::from_def(db, def)?, NameKind::SelfType(ref ty) => match ty.as_adt() { Some((adt, _)) => adt.to_nav(db), None => return None, }, NameKind::Local(local) => local.to_nav(db), NameKind::GenericParam(_) => return None, }; let search_scope = { let base = def.search_scope(db); match search_scope { None => base, Some(scope) => base.intersection(&scope), } }; let references = process_definition(db, def, name, search_scope); Some(RangeInfo::new(range, ReferenceSearchResult { declaration, references })) } fn find_name<'a>( db: &RootDatabase, syntax: &SyntaxNode, position: FilePosition, ) -> Option<RangeInfo<(String, NameDefinition)>> { if let Some(name) = find_node_at_offset::<ast::Name>(&syntax, position.offset) { let def = classify_name(db, position.file_id, &name)?; let range = name.syntax().text_range(); return Some(RangeInfo::new(range, (name.text().to_string(), def))); } let name_ref = find_node_at_offset::<ast::NameRef>(&syntax, position.offset)?; let def = classify_name_ref(db, position.file_id, &name_ref)?; let range = name_ref.syntax().text_range(); Some(RangeInfo::new(range, (name_ref.text().to_string(), def))) } fn process_definition( db: &RootDatabase, def: NameDefinition, name: String, scope: SearchScope, ) -> Vec<FileRange> { let _p = profile("process_definition"); let pat = name.as_str(); let mut refs = vec![]; for (file_id, search_range) in scope { let text = db.file_text(file_id); let parse = Lazy::new(|| SourceFile::parse(&text)); for (idx, _) in text.match_indices(pat) { let offset = TextUnit::from_usize(idx); if let Some(name_ref) = find_node_at_offset::<ast::NameRef>(parse.tree().syntax(), offset) { let range = name_ref.syntax().text_range(); if let Some(search_range) = search_range { if !range.is_subrange(&search_range) { continue; } } if let Some(d) = classify_name_ref(db, file_id, &name_ref) { if d == def { refs.push(FileRange { file_id, range }); } } } } } refs } #[cfg(test)] mod tests { use crate::{ mock_analysis::{analysis_and_position, single_file_with_position, MockAnalysis}, ReferenceSearchResult, SearchScope, }; #[test] fn test_find_all_refs_for_local() { let code = r#" fn main() { let mut i = 1; let j = 1; i = i<|> + j; { i = 0; } i = 5; }"#; let refs = get_all_refs(code); assert_eq!(refs.len(), 5); } #[test] fn test_find_all_refs_for_param_inside() { let code = r#" fn foo(i : u32) -> u32 { i<|> }"#; let refs = get_all_refs(code); assert_eq!(refs.len(), 2); } #[test] fn test_find_all_refs_for_fn_param() { let code = r#" fn foo(i<|> : u32) -> u32 { i }"#; let refs = get_all_refs(code); assert_eq!(refs.len(), 2); } #[test] fn test_find_all_refs_field_name() { let code = r#" //- /lib.rs struct Foo { pub spam<|>: u32, } fn main(s: Foo) { let f = s.spam; } "#; let refs = get_all_refs(code); assert_eq!(refs.len(), 2); } #[test] fn test_find_all_refs_impl_item_name() { let code = r#" //- /lib.rs struct Foo; impl Foo { fn f<|>(&self) { } } "#; let refs = get_all_refs(code); assert_eq!(refs.len(), 1); } #[test] fn test_find_all_refs_enum_var_name() { let code = r#" //- /lib.rs enum Foo { A, B<|>, C, } "#; let refs = get_all_refs(code); assert_eq!(refs.len(), 1); } #[test] fn test_find_all_refs_two_modules() { let code = r#" //- /lib.rs pub mod foo; pub mod bar; fn f() { let i = foo::Foo { n: 5 }; } //- /foo.rs use crate::bar; pub struct Foo { pub n: u32, } fn f() { let i = bar::Bar { n: 5 }; } //- /bar.rs use crate::foo; pub struct Bar { pub n: u32, } fn f() { let i = foo::Foo<|> { n: 5 }; } "#; let (analysis, pos) = analysis_and_position(code); let refs = analysis.find_all_refs(pos, None).unwrap().unwrap(); assert_eq!(refs.len(), 3); } // `mod foo;` is not in the results because `foo` is an `ast::Name`. // So, there are two references: the first one is a definition of the `foo` module, // which is the whole `foo.rs`, and the second one is in `use foo::Foo`. #[test] fn test_find_all_refs_decl_module() { let code = r#" //- /lib.rs mod foo<|>; use foo::Foo; fn f() { let i = Foo { n: 5 }; } //- /foo.rs pub struct Foo { pub n: u32, } "#; let (analysis, pos) = analysis_and_position(code); let refs = analysis.find_all_refs(pos, None).unwrap().unwrap(); assert_eq!(refs.len(), 2); } #[test] fn test_find_all_refs_super_mod_vis() { let code = r#" //- /lib.rs mod foo; //- /foo.rs mod some; use some::Foo; fn f() { let i = Foo { n: 5 }; } //- /foo/some.rs pub(super) struct Foo<|> { pub n: u32, } "#; let (analysis, pos) = analysis_and_position(code); let refs = analysis.find_all_refs(pos, None).unwrap().unwrap(); assert_eq!(refs.len(), 3); } #[test] fn test_find_all_refs_with_scope() { let code = r#" //- /lib.rs mod foo; mod bar; pub fn quux<|>() {} //- /foo.rs fn f() { super::quux(); } //- /bar.rs fn f() { super::quux(); } "#; let (mock, pos) = MockAnalysis::with_files_and_position(code); let bar = mock.id_of("/bar.rs"); let analysis = mock.analysis(); let refs = analysis.find_all_refs(pos, None).unwrap().unwrap(); assert_eq!(refs.len(), 3); let refs = analysis.find_all_refs(pos, Some(SearchScope::single_file(bar))).unwrap().unwrap(); assert_eq!(refs.len(), 2); } fn get_all_refs(text: &str) -> ReferenceSearchResult { let (analysis, position) = single_file_with_position(text); analysis.find_all_refs(position, None).unwrap().unwrap() } }
27.015915
99
0.533922
6763074d88269a84d06a732b23db3f9974fb4877
2,689
//! json-rpc error enum which contains all different errors which can happen //! when sending request and processing reply from json-rpc server. use std::{convert::From, fmt, io}; use tonic::{Code, Status}; #[derive(Debug, PartialEq)] pub enum RpcCode { ParseError, InvalidRequest, MethodNotFound, InvalidParams, InternalError, NotFound, AlreadyExists, } #[derive(Debug)] pub enum Error { InvalidVersion, InvalidReplyId, IoError(io::Error), ParseError(serde_json::Error), ConnectError { sock: String, err: io::Error }, RpcError { code: RpcCode, msg: String }, GenericError(String), } impl From<RpcCode> for Code { fn from(code: RpcCode) -> Code { match code { RpcCode::InvalidParams => Code::InvalidArgument, RpcCode::NotFound => Code::NotFound, RpcCode::AlreadyExists => Code::AlreadyExists, _ => Code::Internal, } } } impl From<Error> for Status { fn from(error: Error) -> Status { match error { Error::RpcError { code, msg, } => Status::new(code.into(), msg), _ => Status::new(Code::Internal, error.to_string()), } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::InvalidVersion => write!(f, "Invalid json-rpc version"), Error::InvalidReplyId => write!(f, "Invalid ID of json-rpc reply"), Error::ConnectError { sock, err, } => write!(f, "Error connecting to {}: {}", sock, err), Error::IoError(err) => write!(f, "IO error: {}", err), Error::ParseError(err) => write!(f, "Invalid json reply: {}", err), Error::RpcError { code, msg, } => write!(f, "Json-rpc error {:?}: {}", code, msg), Error::GenericError(msg) => write!(f, "{}", msg), } } } // Automatic conversion functions for simply using .into() on various return // types follow impl std::error::Error for Error { fn cause(&self) -> Option<&dyn std::error::Error> { None } } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::IoError(err) } } impl From<serde_json::Error> for Error { fn from(err: serde_json::Error) -> Self { Error::ParseError(err) } } impl From<&str> for Error { fn from(err: &str) -> Self { Error::GenericError(err.to_owned()) } } impl From<String> for Error { fn from(err: String) -> Self { Error::GenericError(err) } }
25.855769
79
0.555225
7a5d524663d8a42144238d5c5255fee76239d78b
2,374
use std::hash::{Hash, BuildHasher}; use std::collections::HashSet; /// Interesection of an arbitrary number of sets. /// /// The order do matter, since it will iterate over the first set and checking for the element to /// be contained in the following sets. Thus, it is recommended, that you place the most "narrow" /// sets in the start, to gain maximal performance. #[macro_export] macro_rules! intersection { ($a:expr, $b:expr) => { $a.intersection(&$b) }; ($a:expr, $b:expr, $( $x:expr ),*) => { { $a.iter().filter(|x| $b.contains(x) $( && $x.contains(x))* ) } } } /// Initialize a set with elements. #[macro_export] macro_rules! hashset { [$( $elem:expr ),*] => { { use std::collections::HashSet; let mut hs = HashSet::with_hasher(Default::default()); $( hs.insert($elem); )* hs } }; } /// Extension to the set API. pub trait SetExt<T, S> { // TODO find a better name /// Create a new set. fn new_default() -> Self; } impl<T, S> SetExt<T, S> for HashSet<T, S> where T: Eq + Hash, S: BuildHasher + Default { fn new_default() -> HashSet<T, S> { HashSet::with_hasher(Default::default()) } } #[cfg(test)] mod test { use super::*; use std::collections::HashSet; use std::hash::BuildHasherDefault; use hash::Djb2; #[test] fn test_intersection() { let mut set1 = HashSet::new(); let mut set2 = HashSet::new(); let mut set3 = HashSet::new(); set1.insert(4); set1.insert(5); set2.insert(4); set2.insert(7); set3.insert(4); set3.insert(16); let mut int = intersection!(set1, set2, set3); assert_eq!(int.next(), Some(&4)); assert!(int.next().is_none()); } #[test] fn test_initializer() { let set: HashSet<_, BuildHasherDefault<Djb2>> = hashset!(2, 3, 5, 7, 11, 13); assert!(set.contains(&2)); assert!(set.contains(&3)); assert!(set.contains(&5)); assert!(set.contains(&7)); assert!(set.contains(&11)); assert!(set.contains(&13)); assert!(set.len() == 6); } #[test] fn test_new_default() { let _: HashSet<u64, BuildHasherDefault<Djb2>> = HashSet::new_default(); } }
24.989474
97
0.546757
1111139978b903d9a6c6d459158d822cbd31371b
5,893
use std::io::Write; use crate::parse::{Serialize, Deserialize}; use bin_ser::{Serialize, Deserialize}; #[derive(Clone, Debug)] pub struct Pflags { pub read: bool, pub write: bool, pub append: bool, pub creat: bool, pub trunc: bool, pub excl: bool, } #[derive(Clone, Debug)] pub struct Attrsflags { pub size: bool, pub uidgid: bool, pub permissions: bool, pub acmodtime: bool, pub extended: bool, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ExtendedAttr { pub r#type: String, pub data: String, } #[derive(Clone, Debug, Default)] pub struct Attrs { pub size: Option<u64>, pub uid_gid: Option<(u32, u32)>, pub permissions: Option<u32>, pub atime_mtime: Option<(u32, u32)>, pub extended_attrs: Vec<ExtendedAttr>, } #[derive(Copy, Clone, Debug, Serialize, Deserialize)] #[bin_ser(repr = u32)] pub enum StatusCode { #[bin_ser(val = 0)] r#Ok, #[bin_ser(val = 1)] Eof, #[bin_ser(val = 2)] NoSuchFile, #[bin_ser(val = 3)] PermissionDenied, #[bin_ser(val = 4)] Failure, #[bin_ser(val = 5)] BadMessage, #[bin_ser(val = 6)] NoConnection, #[bin_ser(val = 7)] ConnectionLost, #[bin_ser(val = 8)] OpUnsupported, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Extension { pub name: String, pub data: String, } #[derive(Clone, Debug, Serialize, Deserialize, Default)] pub struct Name { pub filename: String, pub longname: String, pub attrs: Attrs, } #[derive(Clone, Debug)] pub enum ExtendedRequestType { OpensshStatvfs, OpensshPosixRename, OpensshHardlink, OpensshFsync, } #[derive(Clone, Debug, Serialize, Deserialize)] #[bin_ser(repr = ExtendedRequestType)] pub enum ExtendedRequest { #[bin_ser(val = ExtendedRequestType::OpensshStatvfs)] OpensshStatvfs { path: String, }, #[bin_ser(val = ExtendedRequestType::OpensshPosixRename)] OpensshPosixRename { oldpath: String, newpath: String, }, #[bin_ser(val = ExtendedRequestType::OpensshHardlink)] OpensshHardlink { oldpath: String, newpath: String, }, #[bin_ser(val = ExtendedRequestType::OpensshFsync)] OpensshFsync { handle: String, }, } pub type Handle = String; #[derive(Clone, Debug, Serialize, Deserialize)] #[bin_ser(repr = u8)] pub enum SftpClientPacket { #[bin_ser(val = 1)] Init { version: u32, extensions: VecEos<Extension>, }, #[bin_ser(val = 3)] Open { id: u32, filename: String, pflags: Pflags, attrs: Attrs, }, #[bin_ser(val = 4)] Close { id: u32, handle: Handle, }, #[bin_ser(val = 5)] Read { id: u32, handle: Handle, offset: u64, len: u32, }, #[bin_ser(val = 6)] Write { id: u32, handle: Handle, offset: u64, data: VecU8, }, #[bin_ser(val = 7)] Lstat { id: u32, path: String, }, #[bin_ser(val = 8)] Fstat { id: u32, handle: Handle, }, #[bin_ser(val = 9)] Setstat { id: u32, path: String, attrs: Attrs, }, #[bin_ser(val = 10)] Fsetstat { id: u32, handle: Handle, attrs: Attrs, }, #[bin_ser(val = 11)] Opendir { id: u32, path: String, }, #[bin_ser(val = 12)] Readdir { id: u32, handle: Handle, }, #[bin_ser(val = 13)] Remove { id: u32, filename: String, }, #[bin_ser(val = 14)] Mkdir { id: u32, path: String, attrs: Attrs, }, #[bin_ser(val = 15)] Rmdir { id: u32, path: String, }, #[bin_ser(val = 16)] Realpath { id: u32, path: String, }, #[bin_ser(val = 17)] Stat { id: u32, path: String, }, #[bin_ser(val = 18)] Rename { id: u32, oldpath: String, newpath: String, }, #[bin_ser(val = 19)] Readlink { id: u32, path: String, }, #[bin_ser(val = 20)] Symlink { id: u32, linkpath: String, targetpath: String, }, #[bin_ser(val = 200)] Extended { id: u32, extended_request: ExtendedRequest, }, } #[derive(Clone, Debug, Serialize, Deserialize)] #[bin_ser(repr = u8)] pub enum SftpServerPacket { #[bin_ser(val = 2)] Version { version: u32, extensions: VecEos<Extension>, }, #[bin_ser(val = 101)] Status { id: u32, status_code: StatusCode, error_message: String, language_tag: String, }, #[bin_ser(val = 102)] Handle { id: u32, handle: Handle, }, #[bin_ser(val = 103)] Data { id: u32, data: VecU8, }, #[bin_ser(val = 104)] Name { id: u32, names: Vec<Name>, }, #[bin_ser(val = 105)] Attrs { id: u32, attrs: Attrs, }, #[bin_ser(val = 201)] ExtendedReply { id: u32, data: VecU8, }, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct FsStats { pub f_bsize: u64, pub f_frsize: u64, pub f_blocks: u64, pub f_bfree: u64, pub f_bavail: u64, pub f_files: u64, pub f_ffree: u64, pub f_favail: u64, pub f_fsid: u64, pub f_flag: u64, pub f_namemax: u64, } /// Vec that has no length on-wire. It ends when the stream ends. #[derive(Clone, Debug)] pub struct VecEos<T>(pub Vec<T>); impl<T> From<Vec<T>> for VecEos<T> { fn from(vec: Vec<T>) -> Self { Self(vec) } } #[derive(Clone, Debug)] pub struct VecU8(pub Vec<u8>); impl From<Vec<u8>> for VecU8 { fn from(vec: Vec<u8>) -> Self { Self(vec) } }
19.578073
65
0.540472
21e43be20456b654697fc376f997a0243867e536
50,195
//! A bunch of methods and structures more or less related to resolving macros and //! interface provided by `Resolver` to macro expander. use crate::imports::ImportResolver; use crate::Namespace::*; use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BuiltinMacroState, Determinacy}; use crate::{CrateLint, ParentScope, ResolutionError, Resolver, Scope, ScopeSet, Weak}; use crate::{ModuleKind, ModuleOrUniformRoot, NameBinding, PathResult, Segment, ToNameBinding}; use rustc_ast::{self as ast, NodeId}; use rustc_ast_lowering::ResolverAstLowering; use rustc_ast_pretty::pprust; use rustc_attr::StabilityLevel; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::ptr_key::PtrKey; use rustc_errors::struct_span_err; use rustc_expand::base::{Indeterminate, InvocationRes, ResolverExpand, SyntaxExtension}; use rustc_expand::compile_declarative_macro; use rustc_expand::expand::{AstFragment, AstFragmentKind, Invocation, InvocationKind}; use rustc_feature::is_builtin_attr_name; use rustc_hir::def::{self, DefKind, NonMacroAttrKind}; use rustc_hir::def_id; use rustc_middle::middle::stability; use rustc_middle::ty; use rustc_session::lint::builtin::UNUSED_MACROS; use rustc_session::Session; use rustc_span::edition::Edition; use rustc_span::hygiene::{self, ExpnData, ExpnId, ExpnKind}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{Span, DUMMY_SP}; use rustc_data_structures::sync::Lrc; use rustc_span::hygiene::{AstPass, MacroKind}; use std::cell::Cell; use std::{mem, ptr}; type Res = def::Res<NodeId>; /// Binding produced by a `macro_rules` item. /// Not modularized, can shadow previous `macro_rules` bindings, etc. #[derive(Debug)] pub struct MacroRulesBinding<'a> { crate binding: &'a NameBinding<'a>, /// `macro_rules` scope into which the `macro_rules` item was planted. crate parent_macro_rules_scope: MacroRulesScopeRef<'a>, crate ident: Ident, } /// The scope introduced by a `macro_rules!` macro. /// This starts at the macro's definition and ends at the end of the macro's parent /// module (named or unnamed), or even further if it escapes with `#[macro_use]`. /// Some macro invocations need to introduce `macro_rules` scopes too because they /// can potentially expand into macro definitions. #[derive(Copy, Clone, Debug)] pub enum MacroRulesScope<'a> { /// Empty "root" scope at the crate start containing no names. Empty, /// The scope introduced by a `macro_rules!` macro definition. Binding(&'a MacroRulesBinding<'a>), /// The scope introduced by a macro invocation that can potentially /// create a `macro_rules!` macro definition. Invocation(ExpnId), } /// `macro_rules!` scopes are always kept by reference and inside a cell. /// The reason is that we update scopes with value `MacroRulesScope::Invocation(invoc_id)` /// in-place after `invoc_id` gets expanded. /// This helps to avoid uncontrollable growth of `macro_rules!` scope chains, /// which usually grow lineraly with the number of macro invocations /// in a module (including derives) and hurt performance. pub(crate) type MacroRulesScopeRef<'a> = PtrKey<'a, Cell<MacroRulesScope<'a>>>; // Macro namespace is separated into two sub-namespaces, one for bang macros and // one for attribute-like macros (attributes, derives). // We ignore resolutions from one sub-namespace when searching names in scope for another. fn sub_namespace_match(candidate: Option<MacroKind>, requirement: Option<MacroKind>) -> bool { #[derive(PartialEq)] enum SubNS { Bang, AttrLike, } let sub_ns = |kind| match kind { MacroKind::Bang => SubNS::Bang, MacroKind::Attr | MacroKind::Derive => SubNS::AttrLike, }; let candidate = candidate.map(sub_ns); let requirement = requirement.map(sub_ns); // "No specific sub-namespace" means "matches anything" for both requirements and candidates. candidate.is_none() || requirement.is_none() || candidate == requirement } // We don't want to format a path using pretty-printing, // `format!("{}", path)`, because that tries to insert // line-breaks and is slow. fn fast_print_path(path: &ast::Path) -> Symbol { if path.segments.len() == 1 { path.segments[0].ident.name } else { let mut path_str = String::with_capacity(64); for (i, segment) in path.segments.iter().enumerate() { if i != 0 { path_str.push_str("::"); } if segment.ident.name != kw::PathRoot { path_str.push_str(&segment.ident.as_str()) } } Symbol::intern(&path_str) } } /// The code common between processing `#![register_tool]` and `#![register_attr]`. fn registered_idents( sess: &Session, attrs: &[ast::Attribute], attr_name: Symbol, descr: &str, ) -> FxHashSet<Ident> { let mut registered = FxHashSet::default(); for attr in sess.filter_by_name(attrs, attr_name) { for nested_meta in attr.meta_item_list().unwrap_or_default() { match nested_meta.ident() { Some(ident) => { if let Some(old_ident) = registered.replace(ident) { let msg = format!("{} `{}` was already registered", descr, ident); sess.struct_span_err(ident.span, &msg) .span_label(old_ident.span, "already registered here") .emit(); } } None => { let msg = format!("`{}` only accepts identifiers", attr_name); let span = nested_meta.span(); sess.struct_span_err(span, &msg).span_label(span, "not an identifier").emit(); } } } } registered } crate fn registered_attrs_and_tools( sess: &Session, attrs: &[ast::Attribute], ) -> (FxHashSet<Ident>, FxHashSet<Ident>) { let registered_attrs = registered_idents(sess, attrs, sym::register_attr, "attribute"); let mut registered_tools = registered_idents(sess, attrs, sym::register_tool, "tool"); // We implicitly add `rustfmt` and `clippy` to known tools, // but it's not an error to register them explicitly. let predefined_tools = [sym::clippy, sym::rustfmt]; registered_tools.extend(predefined_tools.iter().cloned().map(Ident::with_dummy_span)); (registered_attrs, registered_tools) } impl<'a> ResolverExpand for Resolver<'a> { fn next_node_id(&mut self) -> NodeId { self.next_node_id() } fn resolve_dollar_crates(&mut self) { hygiene::update_dollar_crate_names(|ctxt| { let ident = Ident::new(kw::DollarCrate, DUMMY_SP.with_ctxt(ctxt)); match self.resolve_crate_root(ident).kind { ModuleKind::Def(.., name) if name != kw::Invalid => name, _ => kw::Crate, } }); } fn visit_ast_fragment_with_placeholders(&mut self, expansion: ExpnId, fragment: &AstFragment) { // Integrate the new AST fragment into all the definition and module structures. // We are inside the `expansion` now, but other parent scope components are still the same. let parent_scope = ParentScope { expansion, ..self.invocation_parent_scopes[&expansion] }; let output_macro_rules_scope = self.build_reduced_graph(fragment, parent_scope); self.output_macro_rules_scopes.insert(expansion, output_macro_rules_scope); parent_scope.module.unexpanded_invocations.borrow_mut().remove(&expansion); } fn register_builtin_macro(&mut self, ident: Ident, ext: SyntaxExtension) { if self.builtin_macros.insert(ident.name, BuiltinMacroState::NotYetSeen(ext)).is_some() { self.session .span_err(ident.span, &format!("built-in macro `{}` was already defined", ident)); } } // Create a new Expansion with a definition site of the provided module, or // a fake empty `#[no_implicit_prelude]` module if no module is provided. fn expansion_for_ast_pass( &mut self, call_site: Span, pass: AstPass, features: &[Symbol], parent_module_id: Option<NodeId>, ) -> ExpnId { let expn_id = ExpnId::fresh(Some(ExpnData::allow_unstable( ExpnKind::AstPass(pass), call_site, self.session.edition(), features.into(), None, ))); let parent_scope = if let Some(module_id) = parent_module_id { let parent_def_id = self.local_def_id(module_id); self.definitions.add_parent_module_of_macro_def(expn_id, parent_def_id.to_def_id()); self.module_map[&parent_def_id] } else { self.definitions.add_parent_module_of_macro_def( expn_id, def_id::DefId::local(def_id::CRATE_DEF_INDEX), ); self.empty_module }; self.ast_transform_scopes.insert(expn_id, parent_scope); expn_id } fn resolve_imports(&mut self) { ImportResolver { r: self }.resolve_imports() } fn resolve_macro_invocation( &mut self, invoc: &Invocation, eager_expansion_root: ExpnId, force: bool, ) -> Result<InvocationRes, Indeterminate> { let invoc_id = invoc.expansion_data.id; let parent_scope = match self.invocation_parent_scopes.get(&invoc_id) { Some(parent_scope) => *parent_scope, None => { // If there's no entry in the table, then we are resolving an eagerly expanded // macro, which should inherit its parent scope from its eager expansion root - // the macro that requested this eager expansion. let parent_scope = *self .invocation_parent_scopes .get(&eager_expansion_root) .expect("non-eager expansion without a parent scope"); self.invocation_parent_scopes.insert(invoc_id, parent_scope); parent_scope } }; let (path, kind, derives, after_derive) = match invoc.kind { InvocationKind::Attr { ref attr, ref derives, after_derive, .. } => ( &attr.get_normal_item().path, MacroKind::Attr, self.arenas.alloc_ast_paths(derives), after_derive, ), InvocationKind::Bang { ref mac, .. } => (&mac.path, MacroKind::Bang, &[][..], false), InvocationKind::Derive { ref path, .. } => (path, MacroKind::Derive, &[][..], false), InvocationKind::DeriveContainer { ref derives, .. } => { // Block expansion of the container until we resolve all derives in it. // This is required for two reasons: // - Derive helper attributes are in scope for the item to which the `#[derive]` // is applied, so they have to be produced by the container's expansion rather // than by individual derives. // - Derives in the container need to know whether one of them is a built-in `Copy`. // FIXME: Try to avoid repeated resolutions for derives here and in expansion. let mut exts = Vec::new(); let mut helper_attrs = Vec::new(); for path in derives { exts.push( match self.resolve_macro_path( path, Some(MacroKind::Derive), &parent_scope, true, force, ) { Ok((Some(ext), _)) => { let span = path .segments .last() .unwrap() .ident .span .normalize_to_macros_2_0(); helper_attrs.extend( ext.helper_attrs.iter().map(|name| Ident::new(*name, span)), ); if ext.is_derive_copy { self.add_derive_copy(invoc_id); } ext } Ok(_) | Err(Determinacy::Determined) => { self.dummy_ext(MacroKind::Derive) } Err(Determinacy::Undetermined) => return Err(Indeterminate), }, ) } self.helper_attrs.insert(invoc_id, helper_attrs); return Ok(InvocationRes::DeriveContainer(exts)); } }; // Derives are not included when `invocations` are collected, so we have to add them here. let parent_scope = &ParentScope { derives, ..parent_scope }; let node_id = self.lint_node_id(eager_expansion_root); let (ext, res) = self.smart_resolve_macro_path(path, kind, parent_scope, node_id, force)?; let span = invoc.span(); invoc_id.set_expn_data(ext.expn_data( parent_scope.expansion, span, fast_print_path(path), res.opt_def_id(), )); if let Res::Def(_, _) = res { if after_derive { self.session.span_err(span, "macro attributes must be placed before `#[derive]`"); } let normal_module_def_id = self.macro_def_scope(invoc_id).normal_ancestor_id; self.definitions.add_parent_module_of_macro_def(invoc_id, normal_module_def_id); } match invoc.fragment_kind { AstFragmentKind::Arms | AstFragmentKind::Fields | AstFragmentKind::FieldPats | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::StructFields | AstFragmentKind::Variants => { if let Res::Def(..) = res { self.session.span_err( span, &format!( "expected an inert attribute, found {} {}", res.article(), res.descr() ), ); return Ok(InvocationRes::Single(self.dummy_ext(kind))); } } _ => {} } Ok(InvocationRes::Single(ext)) } fn check_unused_macros(&mut self) { for (_, &(node_id, span)) in self.unused_macros.iter() { self.lint_buffer.buffer_lint(UNUSED_MACROS, node_id, span, "unused macro definition"); } } fn lint_node_id(&mut self, expn_id: ExpnId) -> NodeId { self.invocation_parents .get(&expn_id) .map_or(ast::CRATE_NODE_ID, |id| self.def_id_to_node_id[*id]) } fn has_derive_copy(&self, expn_id: ExpnId) -> bool { self.containers_deriving_copy.contains(&expn_id) } fn add_derive_copy(&mut self, expn_id: ExpnId) { self.containers_deriving_copy.insert(expn_id); } // The function that implements the resolution logic of `#[cfg_accessible(path)]`. // Returns true if the path can certainly be resolved in one of three namespaces, // returns false if the path certainly cannot be resolved in any of the three namespaces. // Returns `Indeterminate` if we cannot give a certain answer yet. fn cfg_accessible(&mut self, expn_id: ExpnId, path: &ast::Path) -> Result<bool, Indeterminate> { let span = path.span; let path = &Segment::from_path(path); let parent_scope = self.invocation_parent_scopes[&expn_id]; let mut indeterminate = false; for ns in [TypeNS, ValueNS, MacroNS].iter().copied() { match self.resolve_path(path, Some(ns), &parent_scope, false, span, CrateLint::No) { PathResult::Module(ModuleOrUniformRoot::Module(_)) => return Ok(true), PathResult::NonModule(partial_res) if partial_res.unresolved_segments() == 0 => { return Ok(true); } PathResult::Indeterminate => indeterminate = true, // FIXME: `resolve_path` is not ready to report partially resolved paths // correctly, so we just report an error if the path was reported as unresolved. // This needs to be fixed for `cfg_accessible` to be useful. PathResult::NonModule(..) | PathResult::Failed { .. } => {} PathResult::Module(_) => panic!("unexpected path resolution"), } } if indeterminate { return Err(Indeterminate); } self.session .struct_span_err(span, "not sure whether the path is accessible or not") .span_note(span, "`cfg_accessible` is not fully implemented") .emit(); Ok(false) } } impl<'a> Resolver<'a> { /// Resolve macro path with error reporting and recovery. fn smart_resolve_macro_path( &mut self, path: &ast::Path, kind: MacroKind, parent_scope: &ParentScope<'a>, node_id: NodeId, force: bool, ) -> Result<(Lrc<SyntaxExtension>, Res), Indeterminate> { let (ext, res) = match self.resolve_macro_path(path, Some(kind), parent_scope, true, force) { Ok((Some(ext), res)) => (ext, res), // Use dummy syntax extensions for unresolved macros for better recovery. Ok((None, res)) => (self.dummy_ext(kind), res), Err(Determinacy::Determined) => (self.dummy_ext(kind), Res::Err), Err(Determinacy::Undetermined) => return Err(Indeterminate), }; // Report errors for the resolved macro. for segment in &path.segments { if let Some(args) = &segment.args { self.session.span_err(args.span(), "generic arguments in macro path"); } if kind == MacroKind::Attr && segment.ident.as_str().starts_with("rustc") { self.session.span_err( segment.ident.span, "attributes starting with `rustc` are reserved for use by the `rustc` compiler", ); } } match res { Res::Def(DefKind::Macro(_), def_id) => { if let Some(def_id) = def_id.as_local() { self.unused_macros.remove(&def_id); if self.proc_macro_stubs.contains(&def_id) { self.session.span_err( path.span, "can't use a procedural macro from the same crate that defines it", ); } } } Res::NonMacroAttr(..) | Res::Err => {} _ => panic!("expected `DefKind::Macro` or `Res::NonMacroAttr`"), }; self.check_stability_and_deprecation(&ext, path, node_id); Ok(if ext.macro_kind() != kind { let expected = kind.descr_expected(); let path_str = pprust::path_to_string(path); let msg = format!("expected {}, found {} `{}`", expected, res.descr(), path_str); self.session .struct_span_err(path.span, &msg) .span_label(path.span, format!("not {} {}", kind.article(), expected)) .emit(); // Use dummy syntax extensions for unexpected macro kinds for better recovery. (self.dummy_ext(kind), Res::Err) } else { (ext, res) }) } pub fn resolve_macro_path( &mut self, path: &ast::Path, kind: Option<MacroKind>, parent_scope: &ParentScope<'a>, trace: bool, force: bool, ) -> Result<(Option<Lrc<SyntaxExtension>>, Res), Determinacy> { let path_span = path.span; let mut path = Segment::from_path(path); // Possibly apply the macro helper hack if kind == Some(MacroKind::Bang) && path.len() == 1 && path[0].ident.span.ctxt().outer_expn_data().local_inner_macros { let root = Ident::new(kw::DollarCrate, path[0].ident.span); path.insert(0, Segment::from_ident(root)); } let res = if path.len() > 1 { let res = match self.resolve_path( &path, Some(MacroNS), parent_scope, false, path_span, CrateLint::No, ) { PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => { Ok(path_res.base_res()) } PathResult::Indeterminate if !force => return Err(Determinacy::Undetermined), PathResult::NonModule(..) | PathResult::Indeterminate | PathResult::Failed { .. } => Err(Determinacy::Determined), PathResult::Module(..) => unreachable!(), }; if trace { let kind = kind.expect("macro kind must be specified if tracing is enabled"); self.multi_segment_macro_resolutions.push(( path, path_span, kind, *parent_scope, res.ok(), )); } self.prohibit_imported_non_macro_attrs(None, res.ok(), path_span); res } else { let scope_set = kind.map_or(ScopeSet::All(MacroNS, false), ScopeSet::Macro); let binding = self.early_resolve_ident_in_lexical_scope( path[0].ident, scope_set, parent_scope, false, force, path_span, ); if let Err(Determinacy::Undetermined) = binding { return Err(Determinacy::Undetermined); } if trace { let kind = kind.expect("macro kind must be specified if tracing is enabled"); self.single_segment_macro_resolutions.push(( path[0].ident, kind, *parent_scope, binding.ok(), )); } let res = binding.map(|binding| binding.res()); self.prohibit_imported_non_macro_attrs(binding.ok(), res.ok(), path_span); res }; res.map(|res| (self.get_macro(res), res)) } // Resolve an identifier in lexical scope. // This is a variation of `fn resolve_ident_in_lexical_scope` that can be run during // expansion and import resolution (perhaps they can be merged in the future). // The function is used for resolving initial segments of macro paths (e.g., `foo` in // `foo::bar!(); or `foo!();`) and also for import paths on 2018 edition. crate fn early_resolve_ident_in_lexical_scope( &mut self, orig_ident: Ident, scope_set: ScopeSet, parent_scope: &ParentScope<'a>, record_used: bool, force: bool, path_span: Span, ) -> Result<&'a NameBinding<'a>, Determinacy> { bitflags::bitflags! { struct Flags: u8 { const MACRO_RULES = 1 << 0; const MODULE = 1 << 1; const DERIVE_HELPER_COMPAT = 1 << 2; const MISC_SUGGEST_CRATE = 1 << 3; const MISC_SUGGEST_SELF = 1 << 4; const MISC_FROM_PRELUDE = 1 << 5; } } assert!(force || !record_used); // `record_used` implies `force` // Make sure `self`, `super` etc produce an error when passed to here. if orig_ident.is_path_segment_keyword() { return Err(Determinacy::Determined); } let (ns, macro_kind, is_import) = match scope_set { ScopeSet::All(ns, is_import) => (ns, None, is_import), ScopeSet::AbsolutePath(ns) => (ns, None, false), ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind), false), }; // This is *the* result, resolution from the scope closest to the resolved identifier. // However, sometimes this result is "weak" because it comes from a glob import or // a macro expansion, and in this case it cannot shadow names from outer scopes, e.g. // mod m { ... } // solution in outer scope // { // use prefix::*; // imports another `m` - innermost solution // // weak, cannot shadow the outer `m`, need to report ambiguity error // m::mac!(); // } // So we have to save the innermost solution and continue searching in outer scopes // to detect potential ambiguities. let mut innermost_result: Option<(&NameBinding<'_>, Flags)> = None; let mut determinacy = Determinacy::Determined; // Go through all the scopes and try to resolve the name. let break_result = self.visit_scopes( scope_set, parent_scope, orig_ident, |this, scope, use_prelude, ident| { let ok = |res, span, arenas| { Ok(( (res, ty::Visibility::Public, span, ExpnId::root()).to_name_binding(arenas), Flags::empty(), )) }; let result = match scope { Scope::DeriveHelpers(expn_id) => { if let Some(attr) = this .helper_attrs .get(&expn_id) .and_then(|attrs| attrs.iter().rfind(|i| ident == **i)) { let binding = ( Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper), ty::Visibility::Public, attr.span, expn_id, ) .to_name_binding(this.arenas); Ok((binding, Flags::empty())) } else { Err(Determinacy::Determined) } } Scope::DeriveHelpersCompat => { let mut result = Err(Determinacy::Determined); for derive in parent_scope.derives { let parent_scope = &ParentScope { derives: &[], ..*parent_scope }; match this.resolve_macro_path( derive, Some(MacroKind::Derive), parent_scope, true, force, ) { Ok((Some(ext), _)) => { if ext.helper_attrs.contains(&ident.name) { let binding = ( Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper), ty::Visibility::Public, derive.span, ExpnId::root(), ) .to_name_binding(this.arenas); result = Ok((binding, Flags::DERIVE_HELPER_COMPAT)); break; } } Ok(_) | Err(Determinacy::Determined) => {} Err(Determinacy::Undetermined) => { result = Err(Determinacy::Undetermined) } } } result } Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() { MacroRulesScope::Binding(macro_rules_binding) if ident == macro_rules_binding.ident => { Ok((macro_rules_binding.binding, Flags::MACRO_RULES)) } MacroRulesScope::Invocation(_) => Err(Determinacy::Undetermined), _ => Err(Determinacy::Determined), }, Scope::CrateRoot => { let root_ident = Ident::new(kw::PathRoot, ident.span); let root_module = this.resolve_crate_root(root_ident); let binding = this.resolve_ident_in_module_ext( ModuleOrUniformRoot::Module(root_module), ident, ns, parent_scope, record_used, path_span, ); match binding { Ok(binding) => Ok((binding, Flags::MODULE | Flags::MISC_SUGGEST_CRATE)), Err((Determinacy::Undetermined, Weak::No)) => { return Some(Err(Determinacy::determined(force))); } Err((Determinacy::Undetermined, Weak::Yes)) => { Err(Determinacy::Undetermined) } Err((Determinacy::Determined, _)) => Err(Determinacy::Determined), } } Scope::Module(module) => { let adjusted_parent_scope = &ParentScope { module, ..*parent_scope }; let binding = this.resolve_ident_in_module_unadjusted_ext( ModuleOrUniformRoot::Module(module), ident, ns, adjusted_parent_scope, true, record_used, path_span, ); match binding { Ok(binding) => { let misc_flags = if ptr::eq(module, this.graph_root) { Flags::MISC_SUGGEST_CRATE } else if module.is_normal() { Flags::MISC_SUGGEST_SELF } else { Flags::empty() }; Ok((binding, Flags::MODULE | misc_flags)) } Err((Determinacy::Undetermined, Weak::No)) => { return Some(Err(Determinacy::determined(force))); } Err((Determinacy::Undetermined, Weak::Yes)) => { Err(Determinacy::Undetermined) } Err((Determinacy::Determined, _)) => Err(Determinacy::Determined), } } Scope::RegisteredAttrs => match this.registered_attrs.get(&ident).cloned() { Some(ident) => ok( Res::NonMacroAttr(NonMacroAttrKind::Registered), ident.span, this.arenas, ), None => Err(Determinacy::Determined), }, Scope::MacroUsePrelude => { match this.macro_use_prelude.get(&ident.name).cloned() { Some(binding) => Ok((binding, Flags::MISC_FROM_PRELUDE)), None => Err(Determinacy::determined( this.graph_root.unexpanded_invocations.borrow().is_empty(), )), } } Scope::BuiltinAttrs => { if is_builtin_attr_name(ident.name) { ok(Res::NonMacroAttr(NonMacroAttrKind::Builtin), DUMMY_SP, this.arenas) } else { Err(Determinacy::Determined) } } Scope::ExternPrelude => match this.extern_prelude_get(ident, !record_used) { Some(binding) => Ok((binding, Flags::empty())), None => Err(Determinacy::determined( this.graph_root.unexpanded_invocations.borrow().is_empty(), )), }, Scope::ToolPrelude => match this.registered_tools.get(&ident).cloned() { Some(ident) => ok(Res::ToolMod, ident.span, this.arenas), None => Err(Determinacy::Determined), }, Scope::StdLibPrelude => { let mut result = Err(Determinacy::Determined); if let Some(prelude) = this.prelude { if let Ok(binding) = this.resolve_ident_in_module_unadjusted( ModuleOrUniformRoot::Module(prelude), ident, ns, parent_scope, false, path_span, ) { if use_prelude || this.is_builtin_macro(binding.res()) { result = Ok((binding, Flags::MISC_FROM_PRELUDE)); } } } result } Scope::BuiltinTypes => { match this.primitive_type_table.primitive_types.get(&ident.name).cloned() { Some(prim_ty) => ok(Res::PrimTy(prim_ty), DUMMY_SP, this.arenas), None => Err(Determinacy::Determined), } } }; match result { Ok((binding, flags)) if sub_namespace_match(binding.macro_kind(), macro_kind) => { if !record_used { return Some(Ok(binding)); } if let Some((innermost_binding, innermost_flags)) = innermost_result { // Found another solution, if the first one was "weak", report an error. let (res, innermost_res) = (binding.res(), innermost_binding.res()); if res != innermost_res { let builtin = Res::NonMacroAttr(NonMacroAttrKind::Builtin); let is_derive_helper_compat = |res, flags: Flags| { res == Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper) && flags.contains(Flags::DERIVE_HELPER_COMPAT) }; let ambiguity_error_kind = if is_import { Some(AmbiguityKind::Import) } else if innermost_res == builtin || res == builtin { Some(AmbiguityKind::BuiltinAttr) } else if is_derive_helper_compat(innermost_res, innermost_flags) || is_derive_helper_compat(res, flags) { Some(AmbiguityKind::DeriveHelper) } else if innermost_flags.contains(Flags::MACRO_RULES) && flags.contains(Flags::MODULE) && !this.disambiguate_macro_rules_vs_modularized( innermost_binding, binding, ) || flags.contains(Flags::MACRO_RULES) && innermost_flags.contains(Flags::MODULE) && !this.disambiguate_macro_rules_vs_modularized( binding, innermost_binding, ) { Some(AmbiguityKind::MacroRulesVsModularized) } else if innermost_binding.is_glob_import() { Some(AmbiguityKind::GlobVsOuter) } else if innermost_binding .may_appear_after(parent_scope.expansion, binding) { Some(AmbiguityKind::MoreExpandedVsOuter) } else { None }; if let Some(kind) = ambiguity_error_kind { let misc = |f: Flags| { if f.contains(Flags::MISC_SUGGEST_CRATE) { AmbiguityErrorMisc::SuggestCrate } else if f.contains(Flags::MISC_SUGGEST_SELF) { AmbiguityErrorMisc::SuggestSelf } else if f.contains(Flags::MISC_FROM_PRELUDE) { AmbiguityErrorMisc::FromPrelude } else { AmbiguityErrorMisc::None } }; this.ambiguity_errors.push(AmbiguityError { kind, ident: orig_ident, b1: innermost_binding, b2: binding, misc1: misc(innermost_flags), misc2: misc(flags), }); return Some(Ok(innermost_binding)); } } } else { // Found the first solution. innermost_result = Some((binding, flags)); } } Ok(..) | Err(Determinacy::Determined) => {} Err(Determinacy::Undetermined) => determinacy = Determinacy::Undetermined, } None }, ); if let Some(break_result) = break_result { return break_result; } // The first found solution was the only one, return it. if let Some((binding, _)) = innermost_result { return Ok(binding); } Err(Determinacy::determined(determinacy == Determinacy::Determined || force)) } crate fn finalize_macro_resolutions(&mut self) { let check_consistency = |this: &mut Self, path: &[Segment], span, kind: MacroKind, initial_res: Option<Res>, res: Res| { if let Some(initial_res) = initial_res { if res != initial_res { // Make sure compilation does not succeed if preferred macro resolution // has changed after the macro had been expanded. In theory all such // situations should be reported as errors, so this is a bug. this.session.delay_span_bug(span, "inconsistent resolution for a macro"); } } else { // It's possible that the macro was unresolved (indeterminate) and silently // expanded into a dummy fragment for recovery during expansion. // Now, post-expansion, the resolution may succeed, but we can't change the // past and need to report an error. // However, non-speculative `resolve_path` can successfully return private items // even if speculative `resolve_path` returned nothing previously, so we skip this // less informative error if the privacy error is reported elsewhere. if this.privacy_errors.is_empty() { let msg = format!( "cannot determine resolution for the {} `{}`", kind.descr(), Segment::names_to_string(path) ); let msg_note = "import resolution is stuck, try simplifying macro imports"; this.session.struct_span_err(span, &msg).note(msg_note).emit(); } } }; let macro_resolutions = mem::take(&mut self.multi_segment_macro_resolutions); for (mut path, path_span, kind, parent_scope, initial_res) in macro_resolutions { // FIXME: Path resolution will ICE if segment IDs present. for seg in &mut path { seg.id = None; } match self.resolve_path( &path, Some(MacroNS), &parent_scope, true, path_span, CrateLint::No, ) { PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => { let res = path_res.base_res(); check_consistency(self, &path, path_span, kind, initial_res, res); } path_res @ PathResult::NonModule(..) | path_res @ PathResult::Failed { .. } => { let (span, label) = if let PathResult::Failed { span, label, .. } = path_res { (span, label) } else { ( path_span, format!( "partially resolved path in {} {}", kind.article(), kind.descr() ), ) }; self.report_error( span, ResolutionError::FailedToResolve { label, suggestion: None }, ); } PathResult::Module(..) | PathResult::Indeterminate => unreachable!(), } } let macro_resolutions = mem::take(&mut self.single_segment_macro_resolutions); for (ident, kind, parent_scope, initial_binding) in macro_resolutions { match self.early_resolve_ident_in_lexical_scope( ident, ScopeSet::Macro(kind), &parent_scope, true, true, ident.span, ) { Ok(binding) => { let initial_res = initial_binding.map(|initial_binding| { self.record_use(ident, MacroNS, initial_binding, false); initial_binding.res() }); let res = binding.res(); let seg = Segment::from_ident(ident); check_consistency(self, &[seg], ident.span, kind, initial_res, res); } Err(..) => { let expected = kind.descr_expected(); let msg = format!("cannot find {} `{}` in this scope", expected, ident); let mut err = self.session.struct_span_err(ident.span, &msg); self.unresolved_macro_suggestions(&mut err, kind, &parent_scope, ident); err.emit(); } } } let builtin_attrs = mem::take(&mut self.builtin_attrs); for (ident, parent_scope) in builtin_attrs { let _ = self.early_resolve_ident_in_lexical_scope( ident, ScopeSet::Macro(MacroKind::Attr), &parent_scope, true, true, ident.span, ); } } fn check_stability_and_deprecation( &mut self, ext: &SyntaxExtension, path: &ast::Path, node_id: NodeId, ) { let span = path.span; if let Some(stability) = &ext.stability { if let StabilityLevel::Unstable { reason, issue, is_soft } = stability.level { let feature = stability.feature; if !self.active_features.contains(&feature) && !span.allows_unstable(feature) { let lint_buffer = &mut self.lint_buffer; let soft_handler = |lint, span, msg: &_| lint_buffer.buffer_lint(lint, node_id, span, msg); stability::report_unstable( self.session, feature, reason, issue, is_soft, span, soft_handler, ); } } } if let Some(depr) = &ext.deprecation { let path = pprust::path_to_string(&path); let (message, lint) = stability::deprecation_message(depr, "macro", &path); stability::early_report_deprecation( &mut self.lint_buffer, &message, depr.suggestion, lint, span, node_id, ); } } fn prohibit_imported_non_macro_attrs( &self, binding: Option<&'a NameBinding<'a>>, res: Option<Res>, span: Span, ) { if let Some(Res::NonMacroAttr(kind)) = res { if kind != NonMacroAttrKind::Tool && binding.map_or(true, |b| b.is_import()) { let msg = format!("cannot use {} {} through an import", kind.article(), kind.descr()); let mut err = self.session.struct_span_err(span, &msg); if let Some(binding) = binding { err.span_note(binding.span, &format!("the {} imported here", kind.descr())); } err.emit(); } } } crate fn check_reserved_macro_name(&mut self, ident: Ident, res: Res) { // Reserve some names that are not quite covered by the general check // performed on `Resolver::builtin_attrs`. if ident.name == sym::cfg || ident.name == sym::cfg_attr || ident.name == sym::derive { let macro_kind = self.get_macro(res).map(|ext| ext.macro_kind()); if macro_kind.is_some() && sub_namespace_match(macro_kind, Some(MacroKind::Attr)) { self.session.span_err( ident.span, &format!("name `{}` is reserved in attribute namespace", ident), ); } } } /// Compile the macro into a `SyntaxExtension` and possibly replace /// its expander to a pre-defined one for built-in macros. crate fn compile_macro(&mut self, item: &ast::Item, edition: Edition) -> SyntaxExtension { let mut result = compile_declarative_macro( &self.session, self.session.features_untracked(), item, edition, ); if result.is_builtin { // The macro was marked with `#[rustc_builtin_macro]`. if let Some(builtin_macro) = self.builtin_macros.get_mut(&item.ident.name) { // The macro is a built-in, replace its expander function // while still taking everything else from the source code. // If we already loaded this builtin macro, give a better error message than 'no such builtin macro'. match mem::replace(builtin_macro, BuiltinMacroState::AlreadySeen(item.span)) { BuiltinMacroState::NotYetSeen(ext) => result.kind = ext.kind, BuiltinMacroState::AlreadySeen(span) => { struct_span_err!( self.session, item.span, E0773, "attempted to define built-in macro more than once" ) .span_note(span, "previously defined here") .emit(); } } } else { let msg = format!("cannot find a built-in macro with name `{}`", item.ident); self.session.span_err(item.span, &msg); } } result } }
45.098832
117
0.490925
9ba029c5931a895a722c241952672a0508a46a12
2,595
extern crate rusoto_mock; use crate::generated::{CloudWatch, CloudWatchClient, Dimension, MetricDatum, PutMetricDataInput}; use self::rusoto_mock::*; use rusoto_core::param::Params; use rusoto_core::signature::SignedRequest; use rusoto_core::signature::SignedRequestPayload; use rusoto_core::Region; use serde_urlencoded; #[test] fn should_serialize_complex_metric_data_params() { let mock = MockRequestDispatcher::with_status(200) .with_body("") .with_request_checker(|request: &SignedRequest| { assert_eq!("POST", request.method); assert_eq!("/", request.path); if let Some(SignedRequestPayload::Buffer(ref buffer)) = request.payload { let params: Params = serde_urlencoded::from_bytes(buffer).unwrap(); assert_eq!( params.get("Namespace"), Some(&Some("TestNamespace".to_owned())) ); assert_eq!( params.get("MetricData.member.1.MetricName"), Some(&Some("buffers".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Unit"), Some(&Some("Bytes".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Value"), Some(&Some("1".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Dimensions.member.1.Name"), Some(&Some("foo".to_owned())) ); assert_eq!( params.get("MetricData.member.1.Dimensions.member.1.Value"), Some(&Some("bar".to_owned())) ); } else { panic!("Unexpected request.payload: {:?}", request.payload); } }); let metric_data = vec![MetricDatum { dimensions: Some(vec![Dimension { name: "foo".to_string(), value: "bar".to_string(), }]), metric_name: "buffers".to_string(), statistic_values: None, timestamp: None, unit: Some("Bytes".to_string()), value: Some(1.0), ..Default::default() }]; let request = PutMetricDataInput { namespace: "TestNamespace".to_string(), metric_data: metric_data, }; let client = CloudWatchClient::new_with(mock, MockCredentialsProvider, Region::UsEast1); let response = client.put_metric_data(request).sync().unwrap(); println!("{:#?}", response); }
37.071429
97
0.544123
e515543260851cf10e7030f2aa76ac991eed8a92
24,969
//! Spilling pass. //! //! The spilling pass is the first to run after the liveness analysis. Its primary function is to //! ensure that the register pressure never exceeds the number of available registers by moving //! some SSA values to spill slots on the stack. This is encoded in the affinity of the value's //! live range. //! //! Some instruction operand constraints may require additional registers to resolve. Since this //! can cause spilling, the spilling pass is also responsible for resolving those constraints by //! inserting copies. The extra constraints are: //! //! 1. A value used by a tied operand must be killed by the instruction. This is resolved by //! inserting a copy to a temporary value when necessary. //! 2. When the same value is used more than once by an instruction, the operand constraints must //! be compatible. Otherwise, the value must be copied into a new register for some of the //! operands. use crate::cursor::{Cursor, EncCursor}; use crate::dominator_tree::DominatorTree; use crate::ir::{ArgumentLoc, Block, Function, Inst, InstBuilder, SigRef, Value, ValueLoc}; use crate::isa::registers::{RegClass, RegClassIndex, RegClassMask, RegUnit}; use crate::isa::{ConstraintKind, EncInfo, RecipeConstraints, RegInfo, TargetIsa}; use crate::regalloc::affinity::Affinity; use crate::regalloc::live_value_tracker::{LiveValue, LiveValueTracker}; use crate::regalloc::liveness::Liveness; use crate::regalloc::pressure::Pressure; use crate::regalloc::virtregs::VirtRegs; use crate::timing; use crate::topo_order::TopoOrder; use alloc::vec::Vec; use core::fmt; use log::debug; /// Return a top-level register class which contains `unit`. fn toprc_containing_regunit(unit: RegUnit, reginfo: &RegInfo) -> RegClass { let bank = reginfo.bank_containing_regunit(unit).unwrap(); reginfo.classes[bank.first_toprc..(bank.first_toprc + bank.num_toprcs)] .iter() .find(|&rc| rc.contains(unit)) .expect("reg unit should be in a toprc") } /// Persistent data structures for the spilling pass. pub struct Spilling { spills: Vec<Value>, reg_uses: Vec<RegUse>, } /// Context data structure that gets instantiated once per pass. struct Context<'a> { // Current instruction as well as reference to function and ISA. cur: EncCursor<'a>, // Cached ISA information. reginfo: RegInfo, encinfo: EncInfo, // References to contextual data structures we need. domtree: &'a DominatorTree, liveness: &'a mut Liveness, virtregs: &'a VirtRegs, topo: &'a mut TopoOrder, // Current register pressure. pressure: Pressure, // Values spilled for the current instruction. These values have already been removed from the // pressure tracker, but they are still present in the live value tracker and their affinity // hasn't been changed yet. spills: &'a mut Vec<Value>, // Uses of register values in the current instruction. reg_uses: &'a mut Vec<RegUse>, } impl Spilling { /// Create a new spilling data structure. pub fn new() -> Self { Self { spills: Vec::new(), reg_uses: Vec::new(), } } /// Clear all data structures in this spilling pass. pub fn clear(&mut self) { self.spills.clear(); self.reg_uses.clear(); } /// Run the spilling algorithm over `func`. pub fn run( &mut self, isa: &dyn TargetIsa, func: &mut Function, domtree: &DominatorTree, liveness: &mut Liveness, virtregs: &VirtRegs, topo: &mut TopoOrder, tracker: &mut LiveValueTracker, ) { let _tt = timing::ra_spilling(); debug!("Spilling for:\n{}", func.display(isa)); let reginfo = isa.register_info(); let usable_regs = isa.allocatable_registers(func); let mut ctx = Context { cur: EncCursor::new(func, isa), reginfo: isa.register_info(), encinfo: isa.encoding_info(), domtree, liveness, virtregs, topo, pressure: Pressure::new(&reginfo, &usable_regs), spills: &mut self.spills, reg_uses: &mut self.reg_uses, }; ctx.run(tracker) } } impl<'a> Context<'a> { fn run(&mut self, tracker: &mut LiveValueTracker) { self.topo.reset(self.cur.func.layout.blocks()); while let Some(block) = self.topo.next(&self.cur.func.layout, self.domtree) { self.visit_block(block, tracker); } } fn visit_block(&mut self, block: Block, tracker: &mut LiveValueTracker) { debug!("Spilling {}:", block); self.cur.goto_top(block); self.visit_block_header(block, tracker); tracker.drop_dead_params(); self.process_spills(tracker); while let Some(inst) = self.cur.next_inst() { if !self.cur.func.dfg[inst].opcode().is_ghost() { self.visit_inst(inst, block, tracker); } else { let (_throughs, kills) = tracker.process_ghost(inst); self.free_regs(kills); } tracker.drop_dead(inst); self.process_spills(tracker); } } // Take all live registers in `regs` from the pressure set. // This doesn't cause any spilling, it is assumed there are enough registers. fn take_live_regs(&mut self, regs: &[LiveValue]) { for lv in regs { if !lv.is_dead { if let Affinity::Reg(rci) = lv.affinity { let rc = self.reginfo.rc(rci); self.pressure.take(rc); } } } } // Free all registers in `kills` from the pressure set. fn free_regs(&mut self, kills: &[LiveValue]) { for lv in kills { if let Affinity::Reg(rci) = lv.affinity { if !self.spills.contains(&lv.value) { let rc = self.reginfo.rc(rci); self.pressure.free(rc); } } } } // Free all dead registers in `regs` from the pressure set. fn free_dead_regs(&mut self, regs: &[LiveValue]) { for lv in regs { if lv.is_dead { if let Affinity::Reg(rci) = lv.affinity { if !self.spills.contains(&lv.value) { let rc = self.reginfo.rc(rci); self.pressure.free(rc); } } } } } fn visit_block_header(&mut self, block: Block, tracker: &mut LiveValueTracker) { let (liveins, params) = tracker.block_top( block, &self.cur.func.dfg, self.liveness, &self.cur.func.layout, self.domtree, ); // Count the live-in registers. These should already fit in registers; they did at the // dominator. self.pressure.reset(); self.take_live_regs(liveins); // An block can have an arbitrary (up to 2^16...) number of parameters, so they are not // guaranteed to fit in registers. for lv in params { if let Affinity::Reg(rci) = lv.affinity { let rc = self.reginfo.rc(rci); 'try_take: while let Err(mask) = self.pressure.take_transient(rc) { debug!("Need {} reg for block param {}", rc, lv.value); match self.spill_candidate(mask, liveins) { Some(cand) => { debug!( "Spilling live-in {} to make room for {} block param {}", cand, rc, lv.value ); self.spill_reg(cand); } None => { // We can't spill any of the live-in registers, so we have to spill an // block argument. Since the current spill metric would consider all the // block arguments equal, just spill the present register. debug!("Spilling {} block argument {}", rc, lv.value); // Since `spill_reg` will free a register, add the current one here. self.pressure.take(rc); self.spill_reg(lv.value); break 'try_take; } } } } } // The transient pressure counts for the block arguments are accurate. Just preserve them. self.pressure.preserve_transient(); self.free_dead_regs(params); } fn visit_inst(&mut self, inst: Inst, block: Block, tracker: &mut LiveValueTracker) { debug!("Inst {}, {}", self.cur.display_inst(inst), self.pressure); debug_assert_eq!(self.cur.current_inst(), Some(inst)); debug_assert_eq!(self.cur.current_block(), Some(block)); let constraints = self .encinfo .operand_constraints(self.cur.func.encodings[inst]); // We may need to resolve register constraints if there are any noteworthy uses. debug_assert!(self.reg_uses.is_empty()); self.collect_reg_uses(inst, block, constraints); // Calls usually have fixed register uses. let call_sig = self.cur.func.dfg.call_signature(inst); if let Some(sig) = call_sig { self.collect_abi_reg_uses(inst, sig); } if !self.reg_uses.is_empty() { self.process_reg_uses(inst, tracker); } // Update the live value tracker with this instruction. let (throughs, kills, defs) = tracker.process_inst(inst, &self.cur.func.dfg, self.liveness); // Remove kills from the pressure tracker. self.free_regs(kills); // If inst is a call, spill all register values that are live across the call. // This means that we don't currently take advantage of callee-saved registers. // TODO: Be more sophisticated. let opcode = self.cur.func.dfg[inst].opcode(); if call_sig.is_some() || opcode == crate::ir::Opcode::X86ElfTlsGetAddr || opcode == crate::ir::Opcode::X86MachoTlsGetAddr { for lv in throughs { if lv.affinity.is_reg() && !self.spills.contains(&lv.value) { self.spill_reg(lv.value); } } } // Make sure we have enough registers for the register defs. // Dead defs are included here. They need a register too. // No need to process call return values, they are in fixed registers. if let Some(constraints) = constraints { for op in constraints.outs { if op.kind != ConstraintKind::Stack { // Add register def to pressure, spill if needed. while let Err(mask) = self.pressure.take_transient(op.regclass) { debug!("Need {} reg from {} throughs", op.regclass, throughs.len()); match self.spill_candidate(mask, throughs) { Some(cand) => self.spill_reg(cand), None => panic!( "Ran out of {} registers for {}", op.regclass, self.cur.display_inst(inst) ), } } } } self.pressure.reset_transient(); } // Restore pressure state, compute pressure with affinities from `defs`. // Exclude dead defs. Includes call return values. // This won't cause spilling. self.take_live_regs(defs); } // Collect register uses that are noteworthy in one of the following ways: // // 1. It's a fixed register constraint. // 2. It's a use of a spilled value. // 3. It's a tied register constraint and the value isn't killed. // // We are assuming here that if a value is used both by a fixed register operand and a register // class operand, they two are compatible. We are also assuming that two register class // operands are always compatible. fn collect_reg_uses( &mut self, inst: Inst, block: Block, constraints: Option<&RecipeConstraints>, ) { let args = self.cur.func.dfg.inst_args(inst); let num_fixed_ins = if let Some(constraints) = constraints { for (idx, (op, &arg)) in constraints.ins.iter().zip(args).enumerate() { let mut reguse = RegUse::new(arg, idx, op.regclass.into()); let lr = &self.liveness[arg]; match op.kind { ConstraintKind::Stack => continue, ConstraintKind::FixedReg(_) => reguse.fixed = true, ConstraintKind::Tied(_) => { // A tied operand must kill the used value. reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout); } ConstraintKind::FixedTied(_) => { reguse.fixed = true; reguse.tied = !lr.killed_at(inst, block, &self.cur.func.layout); } ConstraintKind::Reg => {} } if lr.affinity.is_stack() { reguse.spilled = true; } // Only collect the interesting register uses. if reguse.fixed || reguse.tied || reguse.spilled { debug!(" reguse: {}", reguse); self.reg_uses.push(reguse); } } constraints.ins.len() } else { // A non-ghost instruction with no constraints can't have any // fixed operands. 0 }; // Similarly, for return instructions, collect uses of ABI-defined // return values. if self.cur.func.dfg[inst].opcode().is_return() { debug_assert_eq!( self.cur.func.dfg.inst_variable_args(inst).len(), self.cur.func.signature.returns.len(), "The non-fixed arguments in a return should follow the function's signature." ); for (ret_idx, (ret, &arg)) in self.cur.func.signature.returns.iter().zip(args).enumerate() { let idx = num_fixed_ins + ret_idx; let unit = match ret.location { ArgumentLoc::Unassigned => { panic!("function return signature should be legalized") } ArgumentLoc::Reg(unit) => unit, ArgumentLoc::Stack(_) => continue, }; let toprc = toprc_containing_regunit(unit, &self.reginfo); let mut reguse = RegUse::new(arg, idx, toprc.into()); reguse.fixed = true; debug!(" reguse: {}", reguse); self.reg_uses.push(reguse); } } } // Collect register uses from the ABI input constraints. fn collect_abi_reg_uses(&mut self, inst: Inst, sig: SigRef) { let num_fixed_args = self.cur.func.dfg[inst] .opcode() .constraints() .num_fixed_value_arguments(); let args = self.cur.func.dfg.inst_variable_args(inst); for (idx, (abi, &arg)) in self.cur.func.dfg.signatures[sig] .params .iter() .zip(args) .enumerate() { if abi.location.is_reg() { let (rci, spilled) = match self.liveness[arg].affinity { Affinity::Reg(rci) => (rci, false), Affinity::Stack => ( self.cur.isa.regclass_for_abi_type(abi.value_type).into(), true, ), Affinity::Unassigned => panic!("Missing affinity for {}", arg), }; let mut reguse = RegUse::new(arg, num_fixed_args + idx, rci); reguse.fixed = true; reguse.spilled = spilled; self.reg_uses.push(reguse); } } } // Process multiple register uses to resolve potential conflicts. // // Look for multiple uses of the same value in `self.reg_uses` and insert copies as necessary. // Trigger spilling if any of the temporaries cause the register pressure to become too high. // // Leave `self.reg_uses` empty. fn process_reg_uses(&mut self, inst: Inst, tracker: &LiveValueTracker) { // We're looking for multiple uses of the same value, so start by sorting by value. The // secondary `opidx` key makes it possible to use an unstable (non-allocating) sort. self.reg_uses.sort_unstable_by_key(|u| (u.value, u.opidx)); self.cur.use_srcloc(inst); for i in 0..self.reg_uses.len() { let ru = self.reg_uses[i]; // Do we need to insert a copy for this use? let need_copy = if ru.tied { true } else if ru.fixed { // This is a fixed register use which doesn't necessarily require a copy. // Make a copy only if this is not the first use of the value. self.reg_uses .get(i.wrapping_sub(1)) .map_or(false, |ru2| ru2.value == ru.value) } else { false }; if need_copy { let copy = self.insert_copy(ru.value, ru.rci); self.cur.func.dfg.inst_args_mut(inst)[ru.opidx as usize] = copy; } // Even if we don't insert a copy, we may need to account for register pressure for the // reload pass. if need_copy || ru.spilled { let rc = self.reginfo.rc(ru.rci); while let Err(mask) = self.pressure.take_transient(rc) { debug!("Copy of {} reg causes spill", rc); // Spill a live register that is *not* used by the current instruction. // Spilling a use wouldn't help. // // Do allow spilling of block arguments on branches. This is safe since we spill // the whole virtual register which includes the matching block parameter value // at the branch destination. It is also necessary since there can be // arbitrarily many block arguments. match { let args = if self.cur.func.dfg[inst].opcode().is_branch() { self.cur.func.dfg.inst_fixed_args(inst) } else { self.cur.func.dfg.inst_args(inst) }; self.spill_candidate( mask, tracker.live().iter().filter(|lv| !args.contains(&lv.value)), ) } { Some(cand) => self.spill_reg(cand), None => panic!( "Ran out of {} registers when inserting copy before {}", rc, self.cur.display_inst(inst) ), } } } } self.pressure.reset_transient(); self.reg_uses.clear() } // Find a spill candidate from `candidates` whose top-level register class is in `mask`. fn spill_candidate<'ii, II>(&self, mask: RegClassMask, candidates: II) -> Option<Value> where II: IntoIterator<Item = &'ii LiveValue>, { // Find the best viable spill candidate. // // The very simple strategy implemented here is to spill the value with the earliest def in // the reverse post-order. This strategy depends on a good reload pass to generate good // code. // // We know that all candidate defs dominate the current instruction, so one of them will // dominate the others. That is the earliest def. candidates .into_iter() .filter_map(|lv| { // Viable candidates are registers in one of the `mask` classes, and not already in // the spill set. if let Affinity::Reg(rci) = lv.affinity { let rc = self.reginfo.rc(rci); if (mask & (1 << rc.toprc)) != 0 && !self.spills.contains(&lv.value) { // Here, `lv` is a viable spill candidate. return Some(lv.value); } } None }) .min_by(|&a, &b| { // Find the minimum candidate according to the RPO of their defs. self.domtree.rpo_cmp( self.cur.func.dfg.value_def(a), self.cur.func.dfg.value_def(b), &self.cur.func.layout, ) }) } /// Spill `value` immediately by /// /// 1. Changing its affinity to `Stack` which marks the spill. /// 2. Removing the value from the pressure tracker. /// 3. Adding the value to `self.spills` for later reference by `process_spills`. /// /// Note that this does not update the cached affinity in the live value tracker. Call /// `process_spills` to do that. fn spill_reg(&mut self, value: Value) { if let Affinity::Reg(rci) = self.liveness.spill(value) { let rc = self.reginfo.rc(rci); self.pressure.free(rc); self.spills.push(value); debug!("Spilled {}:{} -> {}", value, rc, self.pressure); } else { panic!("Cannot spill {} that was already on the stack", value); } // Assign a spill slot for the whole virtual register. let ss = self .cur .func .stack_slots .make_spill_slot(self.cur.func.dfg.value_type(value)); for &v in self.virtregs.congruence_class(&value) { self.liveness.spill(v); self.cur.func.locations[v] = ValueLoc::Stack(ss); } } /// Process any pending spills in the `self.spills` vector. /// /// It is assumed that spills are removed from the pressure tracker immediately, see /// `spill_reg` above. /// /// We also need to update the live range affinity and remove spilled values from the live /// value tracker. fn process_spills(&mut self, tracker: &mut LiveValueTracker) { if !self.spills.is_empty() { tracker.process_spills(|v| self.spills.contains(&v)); self.spills.clear() } } /// Insert a `copy value` before the current instruction and give it a live range extending to /// the current instruction. /// /// Returns the new local value created. fn insert_copy(&mut self, value: Value, rci: RegClassIndex) -> Value { let copy = self.cur.ins().copy(value); let inst = self.cur.built_inst(); // Update live ranges. self.liveness.create_dead(copy, inst, Affinity::Reg(rci)); self.liveness.extend_locally( copy, self.cur.func.layout.pp_block(inst), self.cur.current_inst().expect("must be at an instruction"), &self.cur.func.layout, ); copy } } /// Struct representing a register use of a value. /// Used to detect multiple uses of the same value with incompatible register constraints. #[derive(Clone, Copy)] struct RegUse { value: Value, opidx: u16, // Register class required by the use. rci: RegClassIndex, // A use with a fixed register constraint. fixed: bool, // A register use of a spilled value. spilled: bool, // A use with a tied register constraint *and* the used value is not killed. tied: bool, } impl RegUse { fn new(value: Value, idx: usize, rci: RegClassIndex) -> Self { Self { value, opidx: idx as u16, rci, fixed: false, spilled: false, tied: false, } } } impl fmt::Display for RegUse { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}@op{}", self.value, self.opidx)?; if self.fixed { write!(f, "/fixed")?; } if self.spilled { write!(f, "/spilled")?; } if self.tied { write!(f, "/tied")?; } Ok(()) } }
38.953198
100
0.542953
29c9c5856748af687e317719ea4928ac92e46541
1,509
pub struct IconDataArray { props: crate::Props, } impl yew::Component for IconDataArray { type Properties = crate::Props; type Message = (); fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self { Self { props } } fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender { true } fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender { false } fn view(&self) -> yew::prelude::Html { yew::prelude::html! { <svg class=self.props.class.unwrap_or("") width=self.props.size.unwrap_or(24).to_string() height=self.props.size.unwrap_or(24).to_string() viewBox="0 0 24 24" fill=self.props.fill.unwrap_or("none") stroke=self.props.color.unwrap_or("currentColor") stroke-width=self.props.stroke_width.unwrap_or(2).to_string() stroke-linecap=self.props.stroke_linecap.unwrap_or("round") stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round") > <svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><g><rect fill="none" height="24" width="24"/></g><g><g><polygon points="15,4 15,6 18,6 18,18 15,18 15,20 20,20 20,4"/><polygon points="4,20 9,20 9,18 6,18 6,6 9,6 9,4 4,4"/></g></g></svg> </svg> } } }
32.804348
316
0.568588
5d32278da5a8b31d7b20bb64d564f7a950b78dc2
2,599
use serde::{Deserialize, Serialize}; use clickhouse::Row; mod common; #[tokio::test] async fn it_writes_then_reads() { let client = common::prepare_database("it_writes_then_reads").await; #[derive(Debug, Row, Serialize, Deserialize)] struct MyRow<'a> { no: u32, name: &'a str, } // Create a table. client .query( " CREATE TABLE some(no UInt32, name LowCardinality(String)) ENGINE = MergeTree ORDER BY no ", ) .execute() .await .unwrap(); // Write to the table. let mut insert = client.insert("some").unwrap(); for i in 0..1000 { insert.write(&MyRow { no: i, name: "foo" }).await.unwrap(); } insert.end().await.unwrap(); // Read from the table. let mut cursor = client .query("SELECT ?fields FROM some WHERE no BETWEEN ? AND ?") .bind(500) .bind(504) .fetch::<MyRow<'_>>() .unwrap(); let mut i = 500; while let Some(row) = cursor.next().await.unwrap() { assert_eq!(row.no, i); assert_eq!(row.name, "foo"); i += 1; } } // See #19. #[tokio::test] async fn it_requests_long_query() { let client = common::prepare_database("it_requests_long_query").await; client .query("CREATE TABLE test(n String) ENGINE = MergeTree ORDER BY n") .execute() .await .unwrap(); let long_string = "A".repeat(100_000); let got_string = client .query("select ?") .bind(&long_string) .fetch_one::<String>() .await .unwrap(); assert_eq!(got_string, long_string); } // See #22. #[tokio::test] async fn it_works_with_big_borrowed_str() { let client = common::prepare_database("it_works_with_big_borrowed_str").await; #[derive(Debug, Row, Serialize, Deserialize)] struct MyRow<'a> { no: u32, body: &'a str, } client .query("CREATE TABLE test(no UInt32, body String) ENGINE = MergeTree ORDER BY no") .execute() .await .unwrap(); let long_string = "A".repeat(10000); let mut insert = client.insert("test").unwrap(); insert .write(&MyRow { no: 0, body: &long_string, }) .await .unwrap(); insert.end().await.unwrap(); let mut cursor = client .query("SELECT ?fields FROM test") .fetch::<MyRow<'_>>() .unwrap(); let row = cursor.next().await.unwrap().unwrap(); assert_eq!(row.body, long_string); }
22.6
90
0.551366
72fe3af49f7e1e9075b388b4d0d1db0234f5a748
10,247
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use crate::connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT}; use crate::recv_stream::RECV_BUFFER_SIZE; use crate::rtt::GRANULARITY; use crate::stream_id::StreamType; use crate::tparams::{self, PreferredAddress, TransportParameter, TransportParametersHandler}; use crate::tracking::DEFAULT_ACK_DELAY; use crate::{CongestionControlAlgorithm, QuicVersion, Res}; use std::convert::TryFrom; use std::time::Duration; const LOCAL_MAX_DATA: u64 = 0x3FFF_FFFF_FFFF_FFFF; // 2^62-1 const LOCAL_STREAM_LIMIT_BIDI: u64 = 16; const LOCAL_STREAM_LIMIT_UNI: u64 = 16; /// See `ConnectionParameters.ack_ratio` for a discussion of this value. pub const ACK_RATIO_SCALE: u8 = 10; /// By default, aim to have the peer acknowledge 4 times per round trip time. /// See `ConnectionParameters.ack_ratio` for more. const DEFAULT_ACK_RATIO: u8 = 4 * ACK_RATIO_SCALE; /// The local value for the idle timeout period. const DEFAULT_IDLE_TIMEOUT: Duration = Duration::from_secs(30); /// What to do with preferred addresses. #[derive(Debug, Clone)] pub enum PreferredAddressConfig { /// Disabled, whether for client or server. Disabled, /// Enabled at a client, disabled at a server. Default, /// Enabled at both client and server. Address(PreferredAddress), } /// ConnectionParameters use for setting intitial value for QUIC parameters. /// This collects configuration like initial limits, protocol version, and /// congestion control algorithm. #[derive(Debug, Clone)] pub struct ConnectionParameters { quic_version: QuicVersion, cc_algorithm: CongestionControlAlgorithm, /// Initial connection-level flow control limit. max_data: u64, /// Initial flow control limit for receiving data on bidirectional streams that the peer creates. max_stream_data_bidi_remote: u64, /// Initial flow control limit for receiving data on bidirectional streams that this endpoint creates. max_stream_data_bidi_local: u64, /// Initial flow control limit for receiving data on unidirectional streams that the peer creates. max_stream_data_uni: u64, /// Initial limit on bidirectional streams that the peer creates. max_streams_bidi: u64, /// Initial limit on unidirectional streams that this endpoint creates. max_streams_uni: u64, /// The ACK ratio determines how many acknowledgements we will request as a /// fraction of both the current congestion window (expressed in packets) and /// as a fraction of the current round trip time. This value is scaled by /// `ACK_RATIO_SCALE`; that is, if the goal is to have at least five /// acknowledgments every round trip, set the value to `5 * ACK_RATIO_SCALE`. /// Values less than `ACK_RATIO_SCALE` are clamped to `ACK_RATIO_SCALE`. ack_ratio: u8, /// The duration of the idle timeout for the connection. idle_timeout: Duration, preferred_address: PreferredAddressConfig, } impl Default for ConnectionParameters { fn default() -> Self { Self { quic_version: QuicVersion::default(), cc_algorithm: CongestionControlAlgorithm::NewReno, max_data: LOCAL_MAX_DATA, max_stream_data_bidi_remote: u64::try_from(RECV_BUFFER_SIZE).unwrap(), max_stream_data_bidi_local: u64::try_from(RECV_BUFFER_SIZE).unwrap(), max_stream_data_uni: u64::try_from(RECV_BUFFER_SIZE).unwrap(), max_streams_bidi: LOCAL_STREAM_LIMIT_BIDI, max_streams_uni: LOCAL_STREAM_LIMIT_UNI, ack_ratio: DEFAULT_ACK_RATIO, idle_timeout: DEFAULT_IDLE_TIMEOUT, preferred_address: PreferredAddressConfig::Default, } } } impl ConnectionParameters { pub fn get_quic_version(&self) -> QuicVersion { self.quic_version } pub fn quic_version(mut self, v: QuicVersion) -> Self { self.quic_version = v; self } pub fn get_cc_algorithm(&self) -> CongestionControlAlgorithm { self.cc_algorithm } pub fn cc_algorithm(mut self, v: CongestionControlAlgorithm) -> Self { self.cc_algorithm = v; self } pub fn get_max_data(&self) -> u64 { self.max_data } pub fn max_data(mut self, v: u64) -> Self { self.max_data = v; self } pub fn get_max_streams(&self, stream_type: StreamType) -> u64 { match stream_type { StreamType::BiDi => self.max_streams_bidi, StreamType::UniDi => self.max_streams_uni, } } /// # Panics /// If v > 2^60 (the maximum allowed by the protocol). pub fn max_streams(mut self, stream_type: StreamType, v: u64) -> Self { assert!(v <= (1 << 60), "max_streams is too large"); match stream_type { StreamType::BiDi => { self.max_streams_bidi = v; } StreamType::UniDi => { self.max_streams_uni = v; } } self } /// Get the maximum stream data that we will accept on different types of streams. /// # Panics /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination. pub fn get_max_stream_data(&self, stream_type: StreamType, remote: bool) -> u64 { match (stream_type, remote) { (StreamType::BiDi, false) => self.max_stream_data_bidi_local, (StreamType::BiDi, true) => self.max_stream_data_bidi_remote, (StreamType::UniDi, false) => { panic!("Can't get receive limit on a stream that can only be sent.") } (StreamType::UniDi, true) => self.max_stream_data_uni, } } /// Set the maximum stream data that we will accept on different types of streams. /// # Panics /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination /// or if v >= 62 (the maximum allowed by the protocol). pub fn max_stream_data(mut self, stream_type: StreamType, remote: bool, v: u64) -> Self { assert!(v < (1 << 62), "max stream data is too large"); match (stream_type, remote) { (StreamType::BiDi, false) => { self.max_stream_data_bidi_local = v; } (StreamType::BiDi, true) => { self.max_stream_data_bidi_remote = v; } (StreamType::UniDi, false) => { panic!("Can't set receive limit on a stream that can only be sent.") } (StreamType::UniDi, true) => { self.max_stream_data_uni = v; } } self } /// Set a preferred address (which only has an effect for a server). pub fn preferred_address(mut self, preferred: PreferredAddress) -> Self { self.preferred_address = PreferredAddressConfig::Address(preferred); self } /// Disable the use of preferred addresses. pub fn disable_preferred_address(mut self) -> Self { self.preferred_address = PreferredAddressConfig::Disabled; self } pub fn get_preferred_address(&self) -> &PreferredAddressConfig { &self.preferred_address } pub fn ack_ratio(mut self, ack_ratio: u8) -> Self { self.ack_ratio = ack_ratio; self } pub fn get_ack_ratio(&self) -> u8 { self.ack_ratio } /// # Panics /// If `timeout` is 2^62 milliseconds or more. pub fn idle_timeout(mut self, timeout: Duration) -> Self { assert!(timeout.as_millis() < (1 << 62), "idle timeout is too long"); self.idle_timeout = timeout; self } pub fn get_idle_timeout(&self) -> Duration { self.idle_timeout } pub fn create_transport_parameter( &self, role: Role, cid_manager: &mut ConnectionIdManager, ) -> Res<TransportParametersHandler> { let mut tps = TransportParametersHandler::default(); // default parameters tps.local.set_integer( tparams::ACTIVE_CONNECTION_ID_LIMIT, u64::try_from(LOCAL_ACTIVE_CID_LIMIT).unwrap(), ); tps.local.set_empty(tparams::DISABLE_MIGRATION); tps.local.set_empty(tparams::GREASE_QUIC_BIT); tps.local.set_integer( tparams::MAX_ACK_DELAY, u64::try_from(DEFAULT_ACK_DELAY.as_millis()).unwrap(), ); tps.local.set_integer( tparams::MIN_ACK_DELAY, u64::try_from(GRANULARITY.as_micros()).unwrap(), ); // set configurable parameters tps.local .set_integer(tparams::INITIAL_MAX_DATA, self.max_data); tps.local.set_integer( tparams::INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, self.max_stream_data_bidi_local, ); tps.local.set_integer( tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, self.max_stream_data_bidi_remote, ); tps.local.set_integer( tparams::INITIAL_MAX_STREAM_DATA_UNI, self.max_stream_data_uni, ); tps.local .set_integer(tparams::INITIAL_MAX_STREAMS_BIDI, self.max_streams_bidi); tps.local .set_integer(tparams::INITIAL_MAX_STREAMS_UNI, self.max_streams_uni); tps.local.set_integer( tparams::IDLE_TIMEOUT, u64::try_from(self.idle_timeout.as_millis()).unwrap_or(0), ); if let PreferredAddressConfig::Address(preferred) = &self.preferred_address { if role == Role::Server { let (cid, srt) = cid_manager.preferred_address_cid()?; tps.local.set( tparams::PREFERRED_ADDRESS, TransportParameter::PreferredAddress { v4: preferred.ipv4(), v6: preferred.ipv6(), cid, srt, }, ); } } Ok(tps) } }
37.39781
106
0.636186
8a11eb257abedb58dd47edfac88c00e3677576b9
336
pub mod day1; pub mod day10; pub mod day11; pub mod day12; pub mod day13; pub mod day14; pub mod day15; pub mod day16; pub mod day17; pub mod day18; pub mod day19; pub mod day2; pub mod day20; pub mod day21; pub mod day22; pub mod day23; pub mod day3; pub mod day4; pub mod day5; pub mod day6; pub mod day7; pub mod day8; pub mod day9;
14
14
0.72619
0988f5ded8ab9218723abc437a4401b4b1ee608d
21,196
//! Node _execution contexts_, which manage access to the JavaScript engine at various points in the Node.js runtime lifecycle. pub(crate) mod internal; use std; use std::cell::RefCell; use std::convert::Into; use std::marker::PhantomData; use std::os::raw::c_void; use std::panic::UnwindSafe; use neon_runtime; use neon_runtime::raw; use borrow::{Ref, RefMut, Borrow, BorrowMut}; use borrow::internal::Ledger; use context::internal::Env; use handle::{Managed, Handle}; use types::{JsValue, Value, JsObject, JsArray, JsFunction, JsBoolean, JsNumber, JsString, StringResult, JsNull, JsUndefined}; use types::binary::{JsArrayBuffer, JsBuffer}; use types::error::JsError; use object::{Object, This}; use object::class::Class; use result::{NeonResult, JsResult, Throw}; use self::internal::{ContextInternal, Scope, ScopeMetadata}; #[repr(C)] pub(crate) struct CallbackInfo<'a> { info: raw::FunctionCallbackInfo, _lifetime: PhantomData<&'a raw::FunctionCallbackInfo>, } impl CallbackInfo<'_> { pub fn data(&self, env: Env) -> *mut c_void { unsafe { let mut raw_data: *mut c_void = std::mem::zeroed(); neon_runtime::call::data(env.to_raw(), self.info, &mut raw_data); raw_data } } pub unsafe fn with_cx<T: This, U, F: for<'a> FnOnce(CallContext<'a, T>) -> U>(&self, env: Env, f: F) -> U { CallContext::<T>::with(env, self, f) } pub fn set_return<'a, 'b, T: Value>(&'a self, value: Handle<'b, T>) { unsafe { neon_runtime::call::set_return(self.info, value.to_raw()) } } fn kind(&self) -> CallKind { if unsafe { neon_runtime::call::is_construct(std::mem::transmute(self)) } { CallKind::Construct } else { CallKind::Call } } pub fn len<'b, C: Context<'b>>(&self, cx: &C) -> i32 { unsafe { neon_runtime::call::len(cx.env().to_raw(), self.info) } } #[cfg(feature = "legacy-runtime")] pub fn get<'b, C: Context<'b>>(&self, cx: &mut C, i: i32) -> Option<Handle<'b, JsValue>> { if i < 0 || i >= self.len(cx) { return None; } unsafe { let mut local: raw::Local = std::mem::zeroed(); neon_runtime::call::get(cx.env().to_raw(), self.info, i, &mut local); Some(Handle::new_internal(JsValue::from_raw(local))) } } #[cfg(feature = "napi-runtime")] pub fn argv<'b, C: Context<'b>>(&self, cx: &mut C) -> Vec<raw::Local> { unsafe { neon_runtime::call::argv(cx.env().to_raw(), self.info) } } pub fn this<'b, C: Context<'b>>(&self, cx: &mut C) -> raw::Local { let env = cx.env(); unsafe { let mut local: raw::Local = std::mem::zeroed(); neon_runtime::call::this(env.to_raw(), std::mem::transmute(self.info), &mut local); local } } } /// Indicates whether a function call was called with JavaScript's `[[Call]]` or `[[Construct]]` semantics. #[derive(Clone, Copy, Debug)] pub enum CallKind { Construct, Call } /// An RAII implementation of a "scoped lock" of the JS engine. When this structure is dropped (falls out of scope), the engine will be unlocked. /// /// Types of JS values that support the `Borrow` and `BorrowMut` traits can be inspected while the engine is locked by passing a reference to a `Lock` to their methods. pub struct Lock<'a> { pub(crate) ledger: RefCell<Ledger>, pub(crate) env: Env, phantom: PhantomData<&'a ()> } impl<'a> Lock<'a> { fn new(env: Env) -> Self { Lock { ledger: RefCell::new(Ledger::new()), env, phantom: PhantomData, } } } /// An _execution context_, which provides context-sensitive access to the JavaScript engine. Most operations that interact with the engine require passing a reference to a context. /// /// A context has a lifetime `'a`, which ensures the safety of handles managed by the JS garbage collector. All handles created during the lifetime of a context are kept alive for that duration and cannot outlive the context. pub trait Context<'a>: ContextInternal<'a> { /// Lock the JavaScript engine, returning an RAII guard that keeps the lock active as long as the guard is alive. /// /// If this is not the currently active context (for example, if it was used to spawn a scoped context with `execute_scoped` or `compute_scoped`), this method will panic. fn lock(&self) -> Lock<'_> { self.check_active(); Lock::new(self.env()) } /// Convenience method for locking the JavaScript engine and borrowing a single JS value's internals. /// /// # Example: /// /// ```no_run /// # use neon::prelude::*; /// # fn my_neon_function(mut cx: FunctionContext) -> JsResult<JsNumber> { /// let b: Handle<JsArrayBuffer> = cx.argument(0)?; /// let x: u32 = cx.borrow(&b, |data| { data.as_slice()[0] }); /// let n: Handle<JsNumber> = cx.number(x); /// # Ok(n) /// # } /// ``` /// /// Note: the borrowed value is required to be a reference to a handle instead of a handle /// as a workaround for a [Rust compiler bug](https://github.com/rust-lang/rust/issues/29997). /// We may be able to generalize this compatibly in the future when the Rust bug is fixed, /// but while the extra `&` is a small ergonomics regression, this API is still a nice /// convenience. fn borrow<'c, V, T, F>(&self, v: &'c Handle<V>, f: F) -> T where V: Value, &'c V: Borrow, F: for<'b> FnOnce(Ref<'b, <&'c V as Borrow>::Target>) -> T { let lock = self.lock(); let contents = v.borrow(&lock); f(contents) } /// Convenience method for locking the JavaScript engine and mutably borrowing a single JS value's internals. /// /// # Example: /// /// ```no_run /// # use neon::prelude::*; /// # fn my_neon_function(mut cx: FunctionContext) -> JsResult<JsUndefined> { /// let mut b: Handle<JsArrayBuffer> = cx.argument(0)?; /// cx.borrow_mut(&mut b, |data| { /// let slice = data.as_mut_slice::<u32>(); /// slice[0] += 1; /// }); /// # Ok(cx.undefined()) /// # } /// ``` /// /// Note: the borrowed value is required to be a reference to a handle instead of a handle /// as a workaround for a [Rust compiler bug](https://github.com/rust-lang/rust/issues/29997). /// We may be able to generalize this compatibly in the future when the Rust bug is fixed, /// but while the extra `&mut` is a small ergonomics regression, this API is still a nice /// convenience. fn borrow_mut<'c, V, T, F>(&self, v: &'c mut Handle<V>, f: F) -> T where V: Value, &'c mut V: BorrowMut, F: for<'b> FnOnce(RefMut<'b, <&'c mut V as Borrow>::Target>) -> T { let lock = self.lock(); let contents = v.borrow_mut(&lock); f(contents) } /// Executes a computation in a new memory management scope. /// /// Handles created in the new scope are kept alive only for the duration of the computation and cannot escape. /// /// This method can be useful for limiting the life of temporary values created during long-running computations, to prevent leaks. fn execute_scoped<T, F>(&self, f: F) -> T where F: for<'b> FnOnce(ExecuteContext<'b>) -> T { self.check_active(); self.deactivate(); let result = ExecuteContext::with(f); self.activate(); result } /// Executes a computation in a new memory management scope and computes a single result value that outlives the computation. /// /// Handles created in the new scope are kept alive only for the duration of the computation and cannot escape, with the exception of the result value, which is rooted in the outer context. /// /// This method can be useful for limiting the life of temporary values created during long-running computations, to prevent leaks. fn compute_scoped<V, F>(&self, f: F) -> JsResult<'a, V> where V: Value, F: for<'b, 'c> FnOnce(ComputeContext<'b, 'c>) -> JsResult<'b, V> { self.check_active(); self.deactivate(); let result = ComputeContext::with(|cx| { unsafe { let escapable_handle_scope = cx.scope.handle_scope as *mut raw::EscapableHandleScope; let escapee = f(cx)?; let mut result_local: raw::Local = std::mem::zeroed(); neon_runtime::scope::escape(&mut result_local, escapable_handle_scope, escapee.to_raw()); Ok(Handle::new_internal(V::from_raw(result_local))) } }); self.activate(); result } #[cfg(all(feature = "try-catch-api", feature = "napi-runtime"))] fn try_catch<'b: 'a, T, F>(&mut self, f: F) -> Result<Handle<'a, T>, Handle<'a, JsValue>> where T: Value, F: FnOnce(&mut Self) -> JsResult<'b, T> { self.try_catch_internal(f) } #[cfg(all(feature = "try-catch-api", feature = "legacy-runtime"))] fn try_catch<'b: 'a, T, F>(&mut self, f: F) -> Result<Handle<'a, T>, Handle<'a, JsValue>> where T: Value, F: UnwindSafe + FnOnce(&mut Self) -> JsResult<'b, T> { self.try_catch_internal(f) } /// Convenience method for creating a `JsBoolean` value. fn boolean(&mut self, b: bool) -> Handle<'a, JsBoolean> { JsBoolean::new(self, b) } /// Convenience method for creating a `JsNumber` value. fn number<T: Into<f64>>(&mut self, x: T) -> Handle<'a, JsNumber> { JsNumber::new(self, x.into()) } /// Convenience method for creating a `JsString` value. /// /// If the string exceeds the limits of the JS engine, this method panics. fn string<S: AsRef<str>>(&mut self, s: S) -> Handle<'a, JsString> { JsString::new(self, s) } /// Convenience method for creating a `JsString` value. /// /// If the string exceeds the limits of the JS engine, this method returns an `Err` value. fn try_string<S: AsRef<str>>(&mut self, s: S) -> StringResult<'a> { JsString::try_new(self, s) } /// Convenience method for creating a `JsNull` value. fn null(&mut self) -> Handle<'a, JsNull> { #[cfg(feature = "legacy-runtime")] return JsNull::new(); #[cfg(feature = "napi-runtime")] return JsNull::new(self); } /// Convenience method for creating a `JsUndefined` value. fn undefined(&mut self) -> Handle<'a, JsUndefined> { #[cfg(feature = "legacy-runtime")] return JsUndefined::new(); #[cfg(feature = "napi-runtime")] return JsUndefined::new(self); } /// Convenience method for creating an empty `JsObject` value. fn empty_object(&mut self) -> Handle<'a, JsObject> { JsObject::new(self) } /// Convenience method for creating an empty `JsArray` value. fn empty_array(&mut self) -> Handle<'a, JsArray> { JsArray::new(self, 0) } /// Convenience method for creating an empty `JsArrayBuffer` value. fn array_buffer(&mut self, size: u32) -> JsResult<'a, JsArrayBuffer> { JsArrayBuffer::new(self, size) } /// Convenience method for creating an empty `JsBuffer` value. fn buffer(&mut self, size: u32) -> JsResult<'a, JsBuffer> { JsBuffer::new(self, size) } /// Produces a handle to the JavaScript global object. fn global(&mut self) -> Handle<'a, JsObject> { JsObject::build(|out| { unsafe { neon_runtime::scope::get_global(self.env().to_raw(), out); } }) } /// Throws a JS value. fn throw<'b, T: Value, U>(&mut self, v: Handle<'b, T>) -> NeonResult<U> { unsafe { neon_runtime::error::throw(self.env().to_raw(), v.to_raw()); } Err(Throw) } /// Creates a direct instance of the [`Error`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error) class. fn error<S: AsRef<str>>(&mut self, msg: S) -> JsResult<'a, JsError> { JsError::error(self, msg) } /// Creates an instance of the [`TypeError`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/TypeError) class. fn type_error<S: AsRef<str>>(&mut self, msg: S) -> JsResult<'a, JsError> { JsError::type_error(self, msg) } /// Creates an instance of the [`RangeError`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/RangeError) class. fn range_error<S: AsRef<str>>(&mut self, msg: S) -> JsResult<'a, JsError> { JsError::range_error(self, msg) } /// Throws a direct instance of the [`Error`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error) class. fn throw_error<S: AsRef<str>, T>(&mut self, msg: S) -> NeonResult<T> { let err = JsError::error(self, msg)?; self.throw(err) } /// Throws an instance of the [`TypeError`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/TypeError) class. fn throw_type_error<S: AsRef<str>, T>(&mut self, msg: S) -> NeonResult<T> { let err = JsError::type_error(self, msg)?; self.throw(err) } /// Throws an instance of the [`RangeError`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/RangeError) class. fn throw_range_error<S: AsRef<str>, T>(&mut self, msg: S) -> NeonResult<T> { let err = JsError::range_error(self, msg)?; self.throw(err) } } /// A view of the JS engine in the context of top-level initialization of a Neon module. pub struct ModuleContext<'a> { scope: Scope<'a, raw::HandleScope>, exports: Handle<'a, JsObject> } impl<'a> UnwindSafe for ModuleContext<'a> { } impl<'a> ModuleContext<'a> { pub(crate) fn with<T, F: for<'b> FnOnce(ModuleContext<'b>) -> T>(env: Env, exports: Handle<'a, JsObject>, f: F) -> T { // These assertions ensure the proper amount of space is reserved on the rust stack // This is only necessary in the legacy runtime. #[cfg(feature = "legacy-runtime")] { debug_assert!(unsafe { neon_runtime::scope::size() } <= std::mem::size_of::<raw::HandleScope>()); debug_assert!(unsafe { neon_runtime::scope::alignment() } <= std::mem::align_of::<raw::HandleScope>()); } Scope::with(env, |scope| { f(ModuleContext { scope, exports }) }) } /// Convenience method for exporting a Neon function from a module. pub fn export_function<T: Value>(&mut self, key: &str, f: fn(FunctionContext) -> JsResult<T>) -> NeonResult<()> { let value = JsFunction::new(self, f)?.upcast::<JsValue>(); self.exports.set(self, key, value)?; Ok(()) } /// Convenience method for exporting a Neon class constructor from a module. pub fn export_class<T: Class>(&mut self, key: &str) -> NeonResult<()> { let constructor = T::constructor(self)?; self.exports.set(self, key, constructor)?; Ok(()) } /// Exports a JavaScript value from a Neon module. pub fn export_value<T: Value>(&mut self, key: &str, val: Handle<T>) -> NeonResult<()> { self.exports.set(self, key, val)?; Ok(()) } /// Produces a handle to a module's exports object. pub fn exports_object(&mut self) -> JsResult<'a, JsObject> { Ok(self.exports) } } impl<'a> ContextInternal<'a> for ModuleContext<'a> { fn scope_metadata(&self) -> &ScopeMetadata { &self.scope.metadata } } impl<'a> Context<'a> for ModuleContext<'a> { } /// A view of the JS engine in the context of a scoped computation started by `Context::execute_scoped()`. pub struct ExecuteContext<'a> { scope: Scope<'a, raw::HandleScope> } impl<'a> ExecuteContext<'a> { pub(crate) fn with<T, F: for<'b> FnOnce(ExecuteContext<'b>) -> T>(f: F) -> T { let env = Env::current(); Scope::with(env, |scope| { f(ExecuteContext { scope }) }) } } impl<'a> ContextInternal<'a> for ExecuteContext<'a> { fn scope_metadata(&self) -> &ScopeMetadata { &self.scope.metadata } } impl<'a> Context<'a> for ExecuteContext<'a> { } /// A view of the JS engine in the context of a scoped computation started by `Context::compute_scoped()`. pub struct ComputeContext<'a, 'outer> { scope: Scope<'a, raw::EscapableHandleScope>, phantom_inner: PhantomData<&'a ()>, phantom_outer: PhantomData<&'outer ()> } impl<'a, 'b> ComputeContext<'a, 'b> { pub(crate) fn with<T, F: for<'c, 'd> FnOnce(ComputeContext<'c, 'd>) -> T>(f: F) -> T { let env = Env::current(); Scope::with(env, |scope| { f(ComputeContext { scope, phantom_inner: PhantomData, phantom_outer: PhantomData }) }) } } impl<'a, 'b> ContextInternal<'a> for ComputeContext<'a, 'b> { fn scope_metadata(&self) -> &ScopeMetadata { &self.scope.metadata } } impl<'a, 'b> Context<'a> for ComputeContext<'a, 'b> { } /// A view of the JS engine in the context of a function call. /// /// The type parameter `T` is the type of the `this`-binding. pub struct CallContext<'a, T: This> { scope: Scope<'a, raw::HandleScope>, info: &'a CallbackInfo<'a>, #[cfg(feature = "napi-runtime")] arguments: Option<Vec<raw::Local>>, phantom_type: PhantomData<T> } impl<'a, T: This> UnwindSafe for CallContext<'a, T> { } impl<'a, T: This> CallContext<'a, T> { /// Indicates whether the function was called via the JavaScript `[[Call]]` or `[[Construct]]` semantics. pub fn kind(&self) -> CallKind { self.info.kind() } pub(crate) fn with<U, F: for<'b> FnOnce(CallContext<'b, T>) -> U>(env: Env, info: &'a CallbackInfo<'a>, f: F) -> U { Scope::with(env, |scope| { f(CallContext { scope, info, #[cfg(feature = "napi-runtime")] arguments: None, phantom_type: PhantomData }) }) } /// Indicates the number of arguments that were passed to the function. pub fn len(&self) -> i32 { self.info.len(self) } /// Produces the `i`th argument, or `None` if `i` is greater than or equal to `self.len()`. pub fn argument_opt(&mut self, i: i32) -> Option<Handle<'a, JsValue>> { #[cfg(feature = "legacy-runtime")] { self.info.get(self, i) } #[cfg(feature = "napi-runtime")] { let local = if let Some(arguments) = &self.arguments { arguments.get(i as usize).cloned() } else { let arguments = self.info.argv(self); let local = arguments.get(i as usize).cloned(); self.arguments = Some(arguments); local }; local.map(|local| Handle::new_internal(JsValue::from_raw(local))) } } /// Produces the `i`th argument and casts it to the type `V`, or throws an exception if `i` is greater than or equal to `self.len()` or cannot be cast to `V`. pub fn argument<V: Value>(&mut self, i: i32) -> JsResult<'a, V> { match self.argument_opt(i) { Some(v) => v.downcast_or_throw(self), None => self.throw_type_error("not enough arguments") } } /// Produces a handle to the `this`-binding. pub fn this(&mut self) -> Handle<'a, T> { #[cfg(feature = "legacy-runtime")] let this = T::as_this(self.info.this(self)); #[cfg(feature = "napi-runtime")] let this = T::as_this(self.env(), self.info.this(self)); Handle::new_internal(this) } } impl<'a, T: This> ContextInternal<'a> for CallContext<'a, T> { fn scope_metadata(&self) -> &ScopeMetadata { &self.scope.metadata } } impl<'a, T: This> Context<'a> for CallContext<'a, T> { } /// A shorthand for a `CallContext` with `this`-type `JsObject`. pub type FunctionContext<'a> = CallContext<'a, JsObject>; /// An alias for `CallContext`, useful for indicating that the function is a method of a class. pub type MethodContext<'a, T> = CallContext<'a, T>; /// A view of the JS engine in the context of a task completion callback. pub struct TaskContext<'a> { /// We use an "inherited HandleScope" here because the C++ `neon::Task::complete` /// method sets up and tears down a `HandleScope` for us. scope: Scope<'a, raw::InheritedHandleScope> } impl<'a> TaskContext<'a> { pub(crate) fn with<T, F: for<'b> FnOnce(TaskContext<'b>) -> T>(f: F) -> T { let env = Env::current(); Scope::with(env, |scope| { f(TaskContext { scope }) }) } } impl<'a> ContextInternal<'a> for TaskContext<'a> { fn scope_metadata(&self) -> &ScopeMetadata { &self.scope.metadata } } impl<'a> Context<'a> for TaskContext<'a> { }
36.798611
225
0.596575